summaryrefslogtreecommitdiff
path: root/src/mongo/db/storage
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
committerMaria van Keulen <maria@mongodb.com>2017-03-07 12:00:08 -0500
commit589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79 (patch)
treec7a090ffdd56a91ae677e2492c61b820af44f964 /src/mongo/db/storage
parent3cba97198638df3750e3b455e2ad57af7ee536ae (diff)
downloadmongo-589a5c169ced8f6e9ddcd3d182ae1b75db6b7d79.tar.gz
SERVER-27938 Rename all OperationContext variables to opCtx
This commit is an automated rename of all whole word instances of txn, _txn, and txnPtr to opCtx, _opCtx, and opCtxPtr, respectively in all .cpp and .h files in src/mongo.
Diffstat (limited to 'src/mongo/db/storage')
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp32
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h18
-rw-r--r--src/mongo/db/storage/capped_callback.h2
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp58
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp46
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h4
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp85
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h44
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp70
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.h18
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp36
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.h6
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h2
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h6
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp27
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp28
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp95
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp460
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.h118
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp496
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp26
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp38
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h21
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp165
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h42
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp61
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h32
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp39
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.h18
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp31
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h18
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp51
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp75
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.h22
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp20
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h16
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h38
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp48
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h54
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp184
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h34
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp30
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h14
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp130
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h32
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp18
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp32
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp249
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h112
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp221
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.h38
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp22
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp302
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp16
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp142
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.h20
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp24
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp146
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp90
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h55
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp64
-rw-r--r--src/mongo/db/storage/record_fetcher.h2
-rw-r--r--src/mongo/db/storage/record_store.h70
-rw-r--r--src/mongo/db/storage/record_store_test_capped_visibility.cpp58
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.h10
-rw-r--r--src/mongo/db/storage/snapshot_manager.h4
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h43
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp12
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h4
-rw-r--r--src/mongo/db/storage/storage_engine.h12
-rw-r--r--src/mongo/db/storage/storage_init.cpp5
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp122
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h30
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp326
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h70
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h12
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp9
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.h2
108 files changed, 2772 insertions, 2702 deletions
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 7837e898b56..f7c3eb3d2e0 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -102,19 +102,19 @@ void parseMultikeyPathsFromBytes(BSONObj multikeyPathsObj, MultikeyPaths* multik
BSONCollectionCatalogEntry::BSONCollectionCatalogEntry(StringData ns)
: CollectionCatalogEntry(ns) {}
-CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
return md.options;
}
-int BSONCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+int BSONCollectionCatalogEntry::getTotalIndexCount(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
return static_cast<int>(md.indexes.size());
}
-int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
- MetaData md = _getMetaData(txn);
+int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* opCtx) const {
+ MetaData md = _getMetaData(opCtx);
int num = 0;
for (unsigned i = 0; i < md.indexes.size(); i++) {
@@ -124,9 +124,9 @@ int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) co
return num;
}
-BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* opCtx,
StringData indexName) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -134,19 +134,19 @@ BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
}
-void BSONCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+void BSONCollectionCatalogEntry::getAllIndexes(OperationContext* opCtx,
std::vector<std::string>* names) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
for (unsigned i = 0; i < md.indexes.size(); i++) {
names->push_back(md.indexes[i].spec["name"].String());
}
}
-bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -158,17 +158,17 @@ bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
return md.indexes[offset].multikey;
}
-RecordId BSONCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+RecordId BSONCollectionCatalogEntry::getIndexHead(OperationContext* opCtx,
StringData indexName) const {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
return md.indexes[offset].head;
}
-bool BSONCollectionCatalogEntry::isIndexReady(OperationContext* txn, StringData indexName) const {
- MetaData md = _getMetaData(txn);
+bool BSONCollectionCatalogEntry::isIndexReady(OperationContext* opCtx, StringData indexName) const {
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 83c2238fc17..f2dbd891dd7 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -48,23 +48,23 @@ public:
virtual ~BSONCollectionCatalogEntry() {}
- virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
+ virtual CollectionOptions getCollectionOptions(OperationContext* opCtx) const;
- virtual int getTotalIndexCount(OperationContext* txn) const;
+ virtual int getTotalIndexCount(OperationContext* opCtx) const;
- virtual int getCompletedIndexCount(OperationContext* txn) const;
+ virtual int getCompletedIndexCount(OperationContext* opCtx) const;
- virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const;
+ virtual BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const;
- virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const;
+ virtual void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const;
- virtual bool isIndexMultikey(OperationContext* txn,
+ virtual bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const;
- virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const;
+ virtual RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const;
- virtual bool isIndexReady(OperationContext* txn, StringData indexName) const;
+ virtual bool isIndexReady(OperationContext* opCtx, StringData indexName) const;
// ------ for implementors
@@ -111,6 +111,6 @@ public:
};
protected:
- virtual MetaData _getMetaData(OperationContext* txn) const = 0;
+ virtual MetaData _getMetaData(OperationContext* opCtx) const = 0;
};
}
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 21d5c0bad5c..dab688ba711 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -50,7 +50,7 @@ public:
* If data is unowned, it is only valid inside of this call. If implementations wish to
* stash a pointer, they must copy it.
*/
- virtual Status aboutToDeleteCapped(OperationContext* txn,
+ virtual Status aboutToDeleteCapped(OperationContext* opCtx,
const RecordId& loc,
RecordData data) = 0;
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index 6210971ae84..8d22cdf0224 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -51,7 +51,7 @@ public:
return true;
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
};
class DevNullRecordStore : public RecordStore {
@@ -68,11 +68,11 @@ public:
virtual void setCappedCallback(CappedCallback*) {}
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return 0;
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return 0;
}
@@ -80,23 +80,23 @@ public:
return _options.capped;
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const {
return 0;
}
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const {
return RecordData(_dummy.objdata(), _dummy.objsize());
}
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const {
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const {
return false;
}
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl) {}
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl) {}
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -104,7 +104,7 @@ public:
return StatusWith<RecordId>(RecordId(6, 4));
}
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -117,7 +117,7 @@ public:
return Status::OK();
}
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -130,7 +130,7 @@ public:
return false;
}
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -139,18 +139,18 @@ public:
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final {
return stdx::make_unique<EmptyRecordCursor>();
}
- virtual Status truncate(OperationContext* txn) {
+ virtual Status truncate(OperationContext* opCtx) {
return Status::OK();
}
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {}
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {}
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -158,19 +158,19 @@ public:
return Status::OK();
}
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendNumber("numInserts", _numInserts);
}
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const {
return Status::OK();
}
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {}
@@ -195,50 +195,50 @@ class DevNullSortedDataInterface : public SortedDataInterface {
public:
virtual ~DevNullSortedDataInterface() {}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
return new DevNullSortedDataBuilderInterface();
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
return Status::OK();
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
return Status::OK();
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
return 0;
}
- virtual bool isEmpty(OperationContext* txn) {
+ virtual bool isEmpty(OperationContext* opCtx) {
return true;
}
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward) const {
return {};
}
- virtual Status initAsEmpty(OperationContext* txn) {
+ virtual Status initAsEmpty(OperationContext* opCtx) {
return Status::OK();
}
};
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index a036b05c44e..4488c730431 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -142,11 +142,11 @@ public:
_currentKeySize = 0;
}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
return new EphemeralForTestBtreeBuilderImpl(_data, &_currentKeySize, dupsAllowed);
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
@@ -167,12 +167,12 @@ public:
IndexKeyEntry entry(key.getOwned(), loc);
if (_data->insert(entry).second) {
_currentKeySize += key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
+ opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
}
return Status::OK();
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
@@ -184,47 +184,51 @@ public:
invariant(numDeleted <= 1);
if (numDeleted == 1) {
_currentKeySize -= key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
+ opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
}
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
// TODO check invariants?
*numKeysOut = _data->size();
}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
return _currentKeySize + (sizeof(IndexKeyEntry) * _data->size());
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
invariant(!hasFieldNames(key));
if (isDup(*_data, key, loc))
return dupKeyError(key);
return Status::OK();
}
- virtual bool isEmpty(OperationContext* txn) {
+ virtual bool isEmpty(OperationContext* opCtx) {
return _data->empty();
}
- virtual Status touch(OperationContext* txn) const {
+ virtual Status touch(OperationContext* opCtx) const {
// already in memory...
return Status::OK();
}
class Cursor final : public SortedDataInterface::Cursor {
public:
- Cursor(OperationContext* txn, const IndexSet& data, bool isForward, bool isUnique)
- : _txn(txn), _data(data), _forward(isForward), _isUnique(isUnique), _it(data.end()) {}
+ Cursor(OperationContext* opCtx, const IndexSet& data, bool isForward, bool isUnique)
+ : _opCtx(opCtx),
+ _data(data),
+ _forward(isForward),
+ _isUnique(isUnique),
+ _it(data.end()) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
if (_lastMoveWasRestore) {
@@ -291,7 +295,7 @@ public:
void save() override {
// Keep original position if we haven't moved since the last restore.
- _txn = nullptr;
+ _opCtx = nullptr;
if (_lastMoveWasRestore)
return;
@@ -340,11 +344,11 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
private:
@@ -440,7 +444,7 @@ public:
_endState->it = it;
}
- OperationContext* _txn; // not owned
+ OperationContext* _opCtx; // not owned
const IndexSet& _data;
const bool _forward;
const bool _isUnique;
@@ -466,12 +470,12 @@ public:
RecordId _savedLoc;
};
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward) const {
- return stdx::make_unique<Cursor>(txn, *_data, isForward, _isUnique);
+ return stdx::make_unique<Cursor>(opCtx, *_data, isForward, _isUnique);
}
- virtual Status initAsEmpty(OperationContext* txn) {
+ virtual Status initAsEmpty(OperationContext* opCtx) {
// No-op
return Status::OK();
}
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
index bfa58ff7e15..f2544ba5d1d 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_engine.h
@@ -63,11 +63,11 @@ public:
StringData ident,
const IndexDescriptor* desc);
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status::OK();
}
- virtual void endBackup(OperationContext* txn) {}
+ virtual void endBackup(OperationContext* opCtx) {}
virtual Status dropIdent(OperationContext* opCtx, StringData ident);
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
index 111242d5784..7a06bea78bc 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.cpp
@@ -111,7 +111,7 @@ private:
class EphemeralForTestRecordStore::Cursor final : public SeekableRecordCursor {
public:
- Cursor(OperationContext* txn, const EphemeralForTestRecordStore& rs)
+ Cursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
: _records(rs._data->records), _isCapped(rs.isCapped()) {}
boost::optional<Record> next() final {
@@ -160,7 +160,7 @@ public:
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
private:
Records::const_iterator _it;
@@ -174,7 +174,7 @@ private:
class EphemeralForTestRecordStore::ReverseCursor final : public SeekableRecordCursor {
public:
- ReverseCursor(OperationContext* txn, const EphemeralForTestRecordStore& rs)
+ ReverseCursor(OperationContext* opCtx, const EphemeralForTestRecordStore& rs)
: _records(rs._data->records), _isCapped(rs.isCapped()) {}
boost::optional<Record> next() final {
@@ -236,7 +236,7 @@ public:
}
void detachFromOperationContext() final {}
- void reattachToOperationContext(OperationContext* txn) final {}
+ void reattachToOperationContext(OperationContext* opCtx) final {}
private:
Records::const_reverse_iterator _it;
@@ -282,7 +282,8 @@ const char* EphemeralForTestRecordStore::name() const {
return "EphemeralForTest";
}
-RecordData EphemeralForTestRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData EphemeralForTestRecordStore::dataFor(OperationContext* opCtx,
+ const RecordId& loc) const {
return recordFor(loc)->toRecordData();
}
@@ -308,7 +309,7 @@ EphemeralForTestRecordStore::EphemeralForTestRecord* EphemeralForTestRecordStore
return &it->second;
}
-bool EphemeralForTestRecordStore::findRecord(OperationContext* txn,
+bool EphemeralForTestRecordStore::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* rd) const {
Records::const_iterator it = _data->records.find(loc);
@@ -319,28 +320,28 @@ bool EphemeralForTestRecordStore::findRecord(OperationContext* txn,
return true;
}
-void EphemeralForTestRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
+void EphemeralForTestRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
EphemeralForTestRecord* rec = recordFor(loc);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
_data->dataSize -= rec->size;
invariant(_data->records.erase(loc) == 1);
}
-bool EphemeralForTestRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
+bool EphemeralForTestRecordStore::cappedAndNeedDelete(OperationContext* opCtx) const {
if (!_isCapped)
return false;
if (_data->dataSize > _cappedMaxSize)
return true;
- if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
+ if ((_cappedMaxDocs != -1) && (numRecords(opCtx) > _cappedMaxDocs))
return true;
return false;
}
-void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
- while (cappedAndNeedDelete(txn)) {
+void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* opCtx) {
+ while (cappedAndNeedDelete(opCtx)) {
invariant(!_data->records.empty());
Records::iterator oldest = _data->records.begin();
@@ -348,9 +349,9 @@ void EphemeralForTestRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
RecordData data = oldest->second.toRecordData();
if (_cappedCallback)
- uassertStatusOK(_cappedCallback->aboutToDeleteCapped(txn, id, data));
+ uassertStatusOK(_cappedCallback->aboutToDeleteCapped(opCtx, id, data));
- deleteRecord(txn, id);
+ deleteRecord(opCtx, id);
}
}
@@ -366,7 +367,7 @@ StatusWith<RecordId> EphemeralForTestRecordStore::extractAndCheckLocForOplog(con
return status;
}
-StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* txn,
+StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -389,16 +390,16 @@ StatusWith<RecordId> EphemeralForTestRecordStore::insertRecord(OperationContext*
loc = allocateLoc();
}
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ opCtx->recoveryUnit()->registerChange(new InsertChange(_data, loc));
_data->dataSize += len;
_data->records[loc] = rec;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
return StatusWith<RecordId>(loc);
}
-Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
+Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -423,11 +424,11 @@ Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext*
loc = allocateLoc();
}
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ opCtx->recoveryUnit()->registerChange(new InsertChange(_data, loc));
_data->dataSize += len;
_data->records[loc] = rec;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
if (idsOut)
idsOut[i] = loc;
@@ -436,7 +437,7 @@ Status EphemeralForTestRecordStore::insertRecordsWithDocWriter(OperationContext*
return Status::OK();
}
-Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
+Status EphemeralForTestRecordStore::updateRecord(OperationContext* opCtx,
const RecordId& loc,
const char* data,
int len,
@@ -451,7 +452,7 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
if (notifier) {
// The in-memory KV engine uses the invalidation framework (does not support
// doc-locking), and therefore must notify that it is updating a document.
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc);
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(opCtx, loc);
if (!callbackStatus.isOK()) {
return callbackStatus;
}
@@ -460,11 +461,11 @@ Status EphemeralForTestRecordStore::updateRecord(OperationContext* txn,
EphemeralForTestRecord newRecord(len);
memcpy(newRecord.data.get(), data, len);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
_data->dataSize += len - oldLen;
*oldRecord = newRecord;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
return Status::OK();
}
@@ -474,7 +475,7 @@ bool EphemeralForTestRecordStore::updateWithDamagesSupported() const {
}
StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -485,10 +486,10 @@ StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
EphemeralForTestRecord newRecord(len);
memcpy(newRecord.data.get(), oldRecord->data.get(), len);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
*oldRecord = newRecord;
- cappedDeleteAsNeeded(txn);
+ cappedDeleteAsNeeded(opCtx);
char* root = newRecord.data.get();
mutablebson::DamageVector::const_iterator where = damages.begin();
@@ -504,33 +505,33 @@ StatusWith<RecordData> EphemeralForTestRecordStore::updateWithDamages(
return newRecord.toRecordData();
}
-std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(OperationContext* txn,
- bool forward) const {
+std::unique_ptr<SeekableRecordCursor> EphemeralForTestRecordStore::getCursor(
+ OperationContext* opCtx, bool forward) const {
if (forward)
- return stdx::make_unique<Cursor>(txn, *this);
- return stdx::make_unique<ReverseCursor>(txn, *this);
+ return stdx::make_unique<Cursor>(opCtx, *this);
+ return stdx::make_unique<ReverseCursor>(opCtx, *this);
}
-Status EphemeralForTestRecordStore::truncate(OperationContext* txn) {
+Status EphemeralForTestRecordStore::truncate(OperationContext* opCtx) {
// Unlike other changes, TruncateChange mutates _data on construction to perform the
// truncate
- txn->recoveryUnit()->registerChange(new TruncateChange(_data));
+ opCtx->recoveryUnit()->registerChange(new TruncateChange(_data));
return Status::OK();
}
-void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* txn,
+void EphemeralForTestRecordStore::cappedTruncateAfter(OperationContext* opCtx,
RecordId end,
bool inclusive) {
Records::iterator it =
inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end);
while (it != _data->records.end()) {
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
_data->dataSize -= it->second.size;
_data->records.erase(it++);
}
}
-Status EphemeralForTestRecordStore::validate(OperationContext* txn,
+Status EphemeralForTestRecordStore::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -558,7 +559,7 @@ Status EphemeralForTestRecordStore::validate(OperationContext* txn,
return Status::OK();
}
-void EphemeralForTestRecordStore::appendCustomStats(OperationContext* txn,
+void EphemeralForTestRecordStore::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendBool("capped", _isCapped);
@@ -568,7 +569,7 @@ void EphemeralForTestRecordStore::appendCustomStats(OperationContext* txn,
}
}
-Status EphemeralForTestRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status EphemeralForTestRecordStore::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
if (output) {
output->append("numRanges", 1);
output->append("millis", 0);
@@ -576,18 +577,18 @@ Status EphemeralForTestRecordStore::touch(OperationContext* txn, BSONObjBuilder*
return Status::OK();
}
-void EphemeralForTestRecordStore::increaseStorageSize(OperationContext* txn,
+void EphemeralForTestRecordStore::increaseStorageSize(OperationContext* opCtx,
int size,
bool enforceQuota) {
// unclear what this would mean for this class. For now, just error if called.
invariant(!"increaseStorageSize not yet implemented");
}
-int64_t EphemeralForTestRecordStore::storageSize(OperationContext* txn,
+int64_t EphemeralForTestRecordStore::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int infoLevel) const {
// Note: not making use of extraInfo or infoLevel since we don't have extents
- const int64_t recordOverhead = numRecords(txn) * sizeof(EphemeralForTestRecord);
+ const int64_t recordOverhead = numRecords(opCtx) * sizeof(EphemeralForTestRecord);
return _data->dataSize + recordOverhead;
}
@@ -598,7 +599,7 @@ RecordId EphemeralForTestRecordStore::allocateLoc() {
}
boost::optional<RecordId> EphemeralForTestRecordStore::oplogStartHack(
- OperationContext* txn, const RecordId& startingPosition) const {
+ OperationContext* opCtx, const RecordId& startingPosition) const {
if (!_data->isOplog)
return boost::none;
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
index 33168e1dc99..b9d44c6905e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h
@@ -54,23 +54,23 @@ public:
virtual const char* name() const;
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -79,51 +79,51 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
- virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
+ virtual void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota);
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const;
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return _data->dataSize;
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _data->records.size();
}
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const;
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(_data->records.size() == size_t(numRecords));
@@ -179,8 +179,8 @@ private:
StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
RecordId allocateLoc();
- bool cappedAndNeedDelete(OperationContext* txn) const;
- void cappedDeleteAsNeeded(OperationContext* txn);
+ bool cappedAndNeedDelete(OperationContext* opCtx) const;
+ void cappedDeleteAsNeeded(OperationContext* opCtx);
// TODO figure out a proper solution to metadata
const bool _isCapped;
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 9f6e051f140..7ab0d15beb0 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -96,10 +96,10 @@ KVCollectionCatalogEntry::KVCollectionCatalogEntry(KVEngine* engine,
KVCollectionCatalogEntry::~KVCollectionCatalogEntry() {}
-bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
@@ -146,45 +146,45 @@ bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
}
}
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
return true;
}
-void KVCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+void KVCollectionCatalogEntry::setIndexHead(OperationContext* opCtx,
StringData indexName,
const RecordId& newHead) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
md.indexes[offset].head = newHead;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-Status KVCollectionCatalogEntry::removeIndex(OperationContext* txn, StringData indexName) {
- MetaData md = _getMetaData(txn);
+Status KVCollectionCatalogEntry::removeIndex(OperationContext* opCtx, StringData indexName) {
+ MetaData md = _getMetaData(opCtx);
if (md.findIndexOffset(indexName) < 0)
return Status::OK(); // never had the index so nothing to do.
- const string ident = _catalog->getIndexIdent(txn, ns().ns(), indexName);
+ const string ident = _catalog->getIndexIdent(opCtx, ns().ns(), indexName);
md.eraseIndex(indexName);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
// Lazily remove to isolate underlying engine from rollback.
- txn->recoveryUnit()->registerChange(new RemoveIndexChange(txn, this, ident));
+ opCtx->recoveryUnit()->registerChange(new RemoveIndexChange(opCtx, this, ident));
return Status::OK();
}
-Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* opCtx,
const IndexDescriptor* spec) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
IndexMetaData imd(spec->infoObj(), false, RecordId(), false);
if (indexTypeSupportsPathLevelMultikeyTracking(spec->getAccessMethodName())) {
const auto feature =
KVCatalog::FeatureTracker::RepairableFeature::kPathLevelMultikeyTracking;
- if (!_catalog->getFeatureTracker()->isRepairableFeatureInUse(txn, feature)) {
- _catalog->getFeatureTracker()->markRepairableFeatureAsInUse(txn, feature);
+ if (!_catalog->getFeatureTracker()->isRepairableFeatureInUse(opCtx, feature)) {
+ _catalog->getFeatureTracker()->markRepairableFeatureAsInUse(opCtx, feature);
}
imd.multikeyPaths = MultikeyPaths{static_cast<size_t>(spec->keyPattern().nFields())};
}
@@ -192,62 +192,62 @@ Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
// Mark collation feature as in use if the index has a non-simple collation.
if (imd.spec["collation"]) {
const auto feature = KVCatalog::FeatureTracker::NonRepairableFeature::kCollation;
- if (!_catalog->getFeatureTracker()->isNonRepairableFeatureInUse(txn, feature)) {
- _catalog->getFeatureTracker()->markNonRepairableFeatureAsInUse(txn, feature);
+ if (!_catalog->getFeatureTracker()->isNonRepairableFeatureInUse(opCtx, feature)) {
+ _catalog->getFeatureTracker()->markNonRepairableFeatureAsInUse(opCtx, feature);
}
}
md.indexes.push_back(imd);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
- string ident = _catalog->getIndexIdent(txn, ns().ns(), spec->indexName());
+ string ident = _catalog->getIndexIdent(opCtx, ns().ns(), spec->indexName());
- const Status status = _engine->createSortedDataInterface(txn, ident, spec);
+ const Status status = _engine->createSortedDataInterface(opCtx, ident, spec);
if (status.isOK()) {
- txn->recoveryUnit()->registerChange(new AddIndexChange(txn, this, ident));
+ opCtx->recoveryUnit()->registerChange(new AddIndexChange(opCtx, this, ident));
}
return status;
}
-void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn, StringData indexName) {
- MetaData md = _getMetaData(txn);
+void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* opCtx, StringData indexName) {
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(indexName);
invariant(offset >= 0);
md.indexes[offset].ready = true;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
int offset = md.findIndexOffset(idxName);
invariant(offset >= 0);
md.indexes[offset].updateTTLSetting(newExpireSeconds);
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
- MetaData md = _getMetaData(txn);
+void KVCollectionCatalogEntry::updateFlags(OperationContext* opCtx, int newValue) {
+ MetaData md = _getMetaData(opCtx);
md.options.flags = newValue;
md.options.flagsSet = true;
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
-void KVCollectionCatalogEntry::updateValidator(OperationContext* txn,
+void KVCollectionCatalogEntry::updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) {
- MetaData md = _getMetaData(txn);
+ MetaData md = _getMetaData(opCtx);
md.options.validator = validator;
md.options.validationLevel = validationLevel.toString();
md.options.validationAction = validationAction.toString();
- _catalog->putMetaData(txn, ns().toString(), md);
+ _catalog->putMetaData(opCtx, ns().toString(), md);
}
BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData(
- OperationContext* txn) const {
- return _catalog->getMetaData(txn, ns().toString());
+ OperationContext* opCtx) const {
+ return _catalog->getMetaData(opCtx, ns().toString());
}
}
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
index 430e12a3ae2..c999bee1fe5 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
@@ -55,25 +55,25 @@ public:
return 64;
};
- bool setIndexIsMultikey(OperationContext* txn,
+ bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) final;
- void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
+ void setIndexHead(OperationContext* opCtx, StringData indexName, const RecordId& newHead) final;
- Status removeIndex(OperationContext* txn, StringData indexName) final;
+ Status removeIndex(OperationContext* opCtx, StringData indexName) final;
- Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
+ Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) final;
- void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
+ void indexBuildSuccess(OperationContext* opCtx, StringData indexName) final;
- void updateTTLSetting(OperationContext* txn,
+ void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateFlags(OperationContext* opCtx, int newValue) final;
- void updateValidator(OperationContext* txn,
+ void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) final;
@@ -86,7 +86,7 @@ public:
}
protected:
- MetaData _getMetaData(OperationContext* txn) const final;
+ MetaData _getMetaData(OperationContext* opCtx) const final;
private:
class AddIndexChange;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
index f3b5d8579e6..a0dfc0e7559 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
@@ -52,7 +52,7 @@ auto mongo::defaultDatabaseCatalogEntryFactory(const StringData name, KVStorageE
namespace mongo {
-IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
+IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) {
IndexDescriptor* desc = index->descriptor();
@@ -60,9 +60,9 @@ IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
const std::string& type = desc->getAccessMethodName();
std::string ident =
- _engine->getCatalog()->getIndexIdent(txn, collection->ns().ns(), desc->indexName());
+ _engine->getCatalog()->getIndexIdent(opCtx, collection->ns().ns(), desc->indexName());
- SortedDataInterface* sdi = _engine->getEngine()->getSortedDataInterface(txn, ident, desc);
+ SortedDataInterface* sdi = _engine->getEngine()->getSortedDataInterface(opCtx, ident, desc);
if ("" == type)
return new BtreeAccessMethod(index, sdi);
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.h b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
index eb2c9ddb11c..3fe64a3da11 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
@@ -36,7 +36,7 @@ class KVDatabaseCatalogEntry : public KVDatabaseCatalogEntryBase {
public:
using KVDatabaseCatalogEntryBase::KVDatabaseCatalogEntryBase;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) final;
};
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
index 717fae44ca9..76031197e80 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
@@ -191,11 +191,11 @@ RecordStore* KVDatabaseCatalogEntryBase::getRecordStore(StringData ns) const {
return it->second->getRecordStore();
}
-Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* txn,
+Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
if (ns.empty()) {
return Status(ErrorCodes::BadValue, "Collection namespace cannot be empty");
@@ -207,28 +207,29 @@ Status KVDatabaseCatalogEntryBase::createCollection(OperationContext* txn,
}
// need to create it
- Status status = _engine->getCatalog()->newCollection(txn, ns, options);
+ Status status = _engine->getCatalog()->newCollection(opCtx, ns, options);
if (!status.isOK())
return status;
string ident = _engine->getCatalog()->getCollectionIdent(ns);
- status = _engine->getEngine()->createRecordStore(txn, ns, ident, options);
+ status = _engine->getEngine()->createRecordStore(opCtx, ns, ident, options);
if (!status.isOK())
return status;
// Mark collation feature as in use if the collection has a non-simple default collation.
if (!options.collation.isEmpty()) {
const auto feature = KVCatalog::FeatureTracker::NonRepairableFeature::kCollation;
- if (_engine->getCatalog()->getFeatureTracker()->isNonRepairableFeatureInUse(txn, feature)) {
- _engine->getCatalog()->getFeatureTracker()->markNonRepairableFeatureAsInUse(txn,
+ if (_engine->getCatalog()->getFeatureTracker()->isNonRepairableFeatureInUse(opCtx,
+ feature)) {
+ _engine->getCatalog()->getFeatureTracker()->markNonRepairableFeatureAsInUse(opCtx,
feature);
}
}
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns, ident, true));
+ opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns, ident, true));
- auto rs = _engine->getEngine()->getRecordStore(txn, ns, ident, options);
+ auto rs = _engine->getEngine()->getRecordStore(opCtx, ns, ident, options);
invariant(rs);
_collections[ns.toString()] = new KVCollectionCatalogEntry(
@@ -272,11 +273,11 @@ void KVDatabaseCatalogEntryBase::reinitCollectionAfterRepair(OperationContext* o
initCollection(opCtx, ns, false);
}
-Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
+Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
RecordStore* originalRS = NULL;
@@ -294,11 +295,11 @@ Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
- Status status = _engine->getEngine()->okToRename(txn, fromNS, toNS, identFrom, originalRS);
+ Status status = _engine->getEngine()->okToRename(opCtx, fromNS, toNS, identFrom, originalRS);
if (!status.isOK())
return status;
- status = _engine->getCatalog()->renameCollection(txn, fromNS, toNS, stayTemp);
+ status = _engine->getCatalog()->renameCollection(opCtx, fromNS, toNS, stayTemp);
if (!status.isOK())
return status;
@@ -306,17 +307,18 @@ Status KVDatabaseCatalogEntryBase::renameCollection(OperationContext* txn,
invariant(identFrom == identTo);
- BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(txn, toNS);
+ BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(opCtx, toNS);
const CollectionMap::iterator itFrom = _collections.find(fromNS.toString());
invariant(itFrom != _collections.end());
- txn->recoveryUnit()->registerChange(
- new RemoveCollectionChange(txn, this, fromNS, identFrom, itFrom->second, false));
+ opCtx->recoveryUnit()->registerChange(
+ new RemoveCollectionChange(opCtx, this, fromNS, identFrom, itFrom->second, false));
_collections.erase(itFrom);
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, toNS, identTo, false));
+ opCtx->recoveryUnit()->registerChange(
+ new AddCollectionChange(opCtx, this, toNS, identTo, false));
- auto rs = _engine->getEngine()->getRecordStore(txn, toNS, identTo, md.options);
+ auto rs = _engine->getEngine()->getRecordStore(opCtx, toNS, identTo, md.options);
_collections[toNS.toString()] = new KVCollectionCatalogEntry(
_engine->getEngine(), _engine->getCatalog(), toNS, identTo, std::move(rs));
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
index 4d3d50a20e4..bd0e4ec3927 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.h
@@ -67,16 +67,16 @@ public:
RecordStore* getRecordStore(StringData ns) const override;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) override = 0;
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) override;
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) override;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
index 422b36ed58a..dc9da47492b 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.cpp
@@ -42,6 +42,6 @@ std::unique_ptr<mongo::KVDatabaseCatalogEntryMock> mongo::kvDatabaseCatalogEntry
// Used to satisfy link dependencies in unit test - not invoked.
mongo::IndexAccessMethod* mongo::KVDatabaseCatalogEntryMock::getIndex(
- OperationContext* txn, const CollectionCatalogEntry* collection, IndexCatalogEntry* index) {
+ OperationContext* opCtx, const CollectionCatalogEntry* collection, IndexCatalogEntry* index) {
invariant(false);
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
index 54def3f9b5a..840800dfaa4 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_mock.h
@@ -39,7 +39,7 @@ class KVDatabaseCatalogEntryMock : public KVDatabaseCatalogEntryBase {
public:
using KVDatabaseCatalogEntryBase::KVDatabaseCatalogEntryBase;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index) final;
};
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 04c5acb1dfa..f76ef68bb0e 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -91,14 +91,14 @@ public:
virtual Status dropIdent(OperationContext* opCtx, StringData ident) = 0;
// optional
- virtual int flushAllFiles(OperationContext* txn, bool sync) {
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync) {
return 0;
}
/**
* See StorageEngine::beginBackup for details
*/
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status(ErrorCodes::CommandNotSupported,
"The current storage engine doesn't support backup mode");
}
@@ -106,7 +106,7 @@ public:
/**
* See StorageEngine::endBackup for details
*/
- virtual void endBackup(OperationContext* txn) {
+ virtual void endBackup(OperationContext* opCtx) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
index ba97e3af5f8..11cad9890d1 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
@@ -52,36 +52,37 @@ public:
public:
Operation() = default;
Operation(ServiceContext::UniqueClient client, RecoveryUnit* ru)
- : _client(std::move(client)), _txn(_client->makeOperationContext()) {
- delete _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(ru, OperationContext::kNotInUnitOfWork);
+ : _client(std::move(client)), _opCtx(_client->makeOperationContext()) {
+ delete _opCtx->releaseRecoveryUnit();
+ _opCtx->setRecoveryUnit(ru, OperationContext::kNotInUnitOfWork);
}
Operation(Operation&& other) = default;
Operation& operator=(Operation&& other) {
- // Need to assign to _txn first if active. Otherwise we'd destroy _client before _txn.
- _txn = std::move(other._txn);
+ // Need to assign to _opCtx first if active. Otherwise we'd destroy _client before
+ // _opCtx.
+ _opCtx = std::move(other._opCtx);
_client = std::move(other._client);
return *this;
}
OperationContext& operator*() const {
- return *_txn;
+ return *_opCtx;
}
OperationContext* operator->() const {
- return _txn.get();
+ return _opCtx.get();
}
operator OperationContext*() const {
- return _txn.get();
+ return _opCtx.get();
}
private:
ServiceContext::UniqueClient _client;
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
};
Operation makeOperation() {
@@ -104,8 +105,8 @@ public:
return createSnapshot();
}
- RecordId insertRecord(OperationContext* txn, std::string contents = "abcd") {
- auto id = rs->insertRecord(txn, contents.c_str(), contents.length() + 1, false);
+ RecordId insertRecord(OperationContext* opCtx, std::string contents = "abcd") {
+ auto id = rs->insertRecord(opCtx, contents.c_str(), contents.length() + 1, false);
ASSERT_OK(id);
return id.getValue();
}
@@ -136,8 +137,8 @@ public:
/**
* Returns the number of records seen iterating rs using the passed-in OperationContext.
*/
- int itCountOn(OperationContext* txn) {
- auto cursor = rs->getCursor(txn);
+ int itCountOn(OperationContext* opCtx) {
+ auto cursor = rs->getCursor(opCtx);
int count = 0;
while (auto record = cursor->next()) {
count++;
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index ee02c447bdf..e74a60353a4 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -208,12 +208,12 @@ KVDatabaseCatalogEntryBase* KVStorageEngine::getDatabaseCatalogEntry(OperationCo
return db;
}
-Status KVStorageEngine::closeDatabase(OperationContext* txn, StringData db) {
+Status KVStorageEngine::closeDatabase(OperationContext* opCtx, StringData db) {
// This is ok to be a no-op as there is no database layer in kv.
return Status::OK();
}
-Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
+Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
KVDatabaseCatalogEntryBase* entry;
{
stdx::lock_guard<stdx::mutex> lk(_dbsLock);
@@ -228,14 +228,14 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
// wherever possible. Eventually we want to move this up so that it can include the logOp
// inside of the WUOW, but that would require making DB dropping happen inside the Dur
// system for MMAPv1.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
std::list<std::string> toDrop;
entry->getCollectionNamespaces(&toDrop);
for (std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it) {
string coll = *it;
- entry->dropCollection(txn, coll);
+ entry->dropCollection(opCtx, coll);
}
toDrop.clear();
entry->getCollectionNamespaces(&toDrop);
@@ -243,7 +243,7 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
{
stdx::lock_guard<stdx::mutex> lk(_dbsLock);
- txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
+ opCtx->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
_dbs.erase(db.toString());
}
@@ -251,24 +251,24 @@ Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
return Status::OK();
}
-int KVStorageEngine::flushAllFiles(OperationContext* txn, bool sync) {
- return _engine->flushAllFiles(txn, sync);
+int KVStorageEngine::flushAllFiles(OperationContext* opCtx, bool sync) {
+ return _engine->flushAllFiles(opCtx, sync);
}
-Status KVStorageEngine::beginBackup(OperationContext* txn) {
+Status KVStorageEngine::beginBackup(OperationContext* opCtx) {
// We should not proceed if we are already in backup mode
if (_inBackupMode)
return Status(ErrorCodes::BadValue, "Already in Backup Mode");
- Status status = _engine->beginBackup(txn);
+ Status status = _engine->beginBackup(opCtx);
if (status.isOK())
_inBackupMode = true;
return status;
}
-void KVStorageEngine::endBackup(OperationContext* txn) {
+void KVStorageEngine::endBackup(OperationContext* opCtx) {
// We should never reach here if we aren't already in backup mode
invariant(_inBackupMode);
- _engine->endBackup(txn);
+ _engine->endBackup(opCtx);
_inBackupMode = false;
}
@@ -284,12 +284,12 @@ SnapshotManager* KVStorageEngine::getSnapshotManager() const {
return _engine->getSnapshotManager();
}
-Status KVStorageEngine::repairRecordStore(OperationContext* txn, const std::string& ns) {
- Status status = _engine->repairIdent(txn, _catalog->getCollectionIdent(ns));
+Status KVStorageEngine::repairRecordStore(OperationContext* opCtx, const std::string& ns) {
+ Status status = _engine->repairIdent(opCtx, _catalog->getCollectionIdent(ns));
if (!status.isOK())
return status;
- _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(txn, ns);
+ _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(opCtx, ns);
return Status::OK();
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index ba656ae85c1..800f698b34a 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -89,21 +89,21 @@ public:
return _supportsDocLocking;
}
- virtual Status closeDatabase(OperationContext* txn, StringData db);
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db);
- virtual Status dropDatabase(OperationContext* txn, StringData db);
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db);
- virtual int flushAllFiles(OperationContext* txn, bool sync);
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync);
- virtual Status beginBackup(OperationContext* txn);
+ virtual Status beginBackup(OperationContext* opCtx);
- virtual void endBackup(OperationContext* txn);
+ virtual void endBackup(OperationContext* opCtx);
virtual bool isDurable() const;
virtual bool isEphemeral() const;
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns);
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns);
virtual void cleanShutdown();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index c2e8d710062..14a3e57503b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -81,57 +81,57 @@ public:
virtual ~BtreeInterfaceImpl() {}
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
- return new BtreeBuilderInterfaceImpl<OnDiskFormat>(txn,
- _btree->newBuilder(txn, dupsAllowed));
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) {
+ return new BtreeBuilderInterfaceImpl<OnDiskFormat>(opCtx,
+ _btree->newBuilder(opCtx, dupsAllowed));
}
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- return _btree->insert(txn, key, DiskLoc::fromRecordId(loc), dupsAllowed);
+ return _btree->insert(opCtx, key, DiskLoc::fromRecordId(loc), dupsAllowed);
}
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) {
- _btree->unindex(txn, key, DiskLoc::fromRecordId(loc));
+ _btree->unindex(opCtx, key, DiskLoc::fromRecordId(loc));
}
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
- *numKeysOut = _btree->fullValidate(txn, NULL, false, false, 0);
+ *numKeysOut = _btree->fullValidate(opCtx, NULL, false, false, 0);
}
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
return false;
}
- virtual long long getSpaceUsedBytes(OperationContext* txn) const {
- return _btree->getRecordStore()->dataSize(txn);
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const {
+ return _btree->getRecordStore()->dataSize(opCtx);
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
- return _btree->dupKeyCheck(txn, key, DiskLoc::fromRecordId(loc));
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& loc) {
+ return _btree->dupKeyCheck(opCtx, key, DiskLoc::fromRecordId(loc));
}
- virtual bool isEmpty(OperationContext* txn) {
- return _btree->isEmpty(txn);
+ virtual bool isEmpty(OperationContext* opCtx) {
+ return _btree->isEmpty(opCtx);
}
- virtual Status touch(OperationContext* txn) const {
- return _btree->touch(txn);
+ virtual Status touch(OperationContext* opCtx) const {
+ return _btree->touch(opCtx);
}
class Cursor final : public SortedDataInterface::Cursor {
public:
- Cursor(OperationContext* txn, const BtreeLogic<OnDiskFormat>* btree, bool forward)
- : _txn(txn), _btree(btree), _direction(forward ? 1 : -1), _ofs(0) {}
+ Cursor(OperationContext* opCtx, const BtreeLogic<OnDiskFormat>* btree, bool forward)
+ : _opCtx(opCtx), _btree(btree), _direction(forward ? 1 : -1), _ofs(0) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
if (isEOF())
@@ -140,7 +140,7 @@ public:
// Return current position rather than advancing.
_lastMoveWasRestore = false;
} else {
- _btree->advance(_txn, &_bucket, &_ofs, _direction);
+ _btree->advance(_opCtx, &_bucket, &_ofs, _direction);
}
if (atEndPoint())
@@ -186,12 +186,12 @@ public:
if (canUseAdvanceTo) {
// This takes advantage of current location.
- _btree->advanceTo(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ _btree->advanceTo(_opCtx, &_bucket, &_ofs, seekPoint, _direction);
} else {
// Start at root.
- _bucket = _btree->getHead(_txn);
+ _bucket = _btree->getHead(_opCtx);
_ofs = 0;
- _btree->customLocate(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ _btree->customLocate(_opCtx, &_bucket, &_ofs, seekPoint, _direction);
}
_lastMoveWasRestore = false;
@@ -239,7 +239,8 @@ public:
if (_btree->savedCursors()->unregisterCursor(&_saved)) {
// We can use the fast restore mechanism.
- _btree->restorePosition(_txn, _saved.key, _saved.loc, _direction, &_bucket, &_ofs);
+ _btree->restorePosition(
+ _opCtx, _saved.key, _saved.loc, _direction, &_bucket, &_ofs);
} else {
// Need to find our position from the root.
locate(_saved.key, _saved.loc.toRecordId());
@@ -251,11 +252,11 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
private:
@@ -288,7 +289,7 @@ public:
}
void locate(const BSONObj& key, const RecordId& loc) {
- _btree->locate(_txn, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
+ _btree->locate(_opCtx, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
if (atOrPastEndPointAfterSeeking())
markEOF();
}
@@ -301,16 +302,16 @@ public:
}
BSONObj getKey() const {
- return _btree->getKey(_txn, _bucket, _ofs);
+ return _btree->getKey(_opCtx, _bucket, _ofs);
}
DiskLoc getDiskLoc() const {
- return _btree->getDiskLoc(_txn, _bucket, _ofs);
+ return _btree->getDiskLoc(_opCtx, _bucket, _ofs);
}
void seekEndCursor() {
if (!_endState)
return;
- _btree->locate(_txn,
+ _btree->locate(_opCtx,
_endState->key,
forward() == _endState->inclusive ? DiskLoc::max() : DiskLoc::min(),
_direction,
@@ -322,7 +323,7 @@ public:
return _direction == 1;
}
- OperationContext* _txn; // not owned
+ OperationContext* _opCtx; // not owned
const BtreeLogic<OnDiskFormat>* const _btree;
const int _direction;
@@ -347,29 +348,29 @@ public:
SavedCursorRegistry::SavedCursor _saved;
};
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const {
- return stdx::make_unique<Cursor>(txn, _btree.get(), isForward);
+ return stdx::make_unique<Cursor>(opCtx, _btree.get(), isForward);
}
class RandomCursor final : public SortedDataInterface::Cursor {
public:
- RandomCursor(OperationContext* txn, const BtreeLogic<OnDiskFormat>* btree)
- : _txn(txn), _btree(btree) {}
+ RandomCursor(OperationContext* opCtx, const BtreeLogic<OnDiskFormat>* btree)
+ : _opCtx(opCtx), _btree(btree) {}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- if (_btree->isEmpty(_txn)) {
+ if (_btree->isEmpty(_opCtx)) {
return {};
}
- return _btree->getRandomEntry(_txn);
+ return _btree->getRandomEntry(_opCtx);
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
//
@@ -396,17 +397,17 @@ public:
void restore() override {}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
const BtreeLogic<OnDiskFormat>* const _btree;
};
virtual std::unique_ptr<SortedDataInterface::Cursor> newRandomCursor(
- OperationContext* txn) const {
- return stdx::make_unique<RandomCursor>(txn, _btree.get());
+ OperationContext* opCtx) const {
+ return stdx::make_unique<RandomCursor>(opCtx, _btree.get());
}
- virtual Status initAsEmpty(OperationContext* txn) {
- return _btree->initAsEmpty(txn);
+ virtual Status initAsEmpty(OperationContext* opCtx) {
+ return _btree->initAsEmpty(opCtx);
}
private:
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index f6702fe27aa..de030d13cdc 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -80,22 +80,22 @@ std::once_flag assertValidFlag;
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::Builder* BtreeLogic<BtreeLayout>::newBuilder(
- OperationContext* txn, bool dupsAllowed) {
- return new Builder(this, txn, dupsAllowed);
+ OperationContext* opCtx, bool dupsAllowed) {
+ return new Builder(this, opCtx, dupsAllowed);
}
template <class BtreeLayout>
BtreeLogic<BtreeLayout>::Builder::Builder(BtreeLogic* logic,
- OperationContext* txn,
+ OperationContext* opCtx,
bool dupsAllowed)
- : _logic(logic), _dupsAllowed(dupsAllowed), _txn(txn) {
+ : _logic(logic), _dupsAllowed(dupsAllowed), _opCtx(opCtx) {
// The normal bulk building path calls initAsEmpty, so we already have an empty root bucket.
// This isn't the case in some unit tests that use the Builder directly rather than going
// through an IndexAccessMethod.
- _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(txn));
+ _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(opCtx));
if (_rightLeafLoc.isNull()) {
- _rightLeafLoc = _logic->_addBucket(txn);
- _logic->_headManager->setHead(_txn, _rightLeafLoc.toRecordId());
+ _rightLeafLoc = _logic->_addBucket(opCtx);
+ _logic->_headManager->setHead(_opCtx, _rightLeafLoc.toRecordId());
}
// must be empty when starting
@@ -146,7 +146,7 @@ Status BtreeLogic<BtreeLayout>::Builder::addKey(const BSONObj& keyObj, const Dis
BucketType* rightLeaf = _getModifiableBucket(_rightLeafLoc);
if (!_logic->pushBack(rightLeaf, loc, *key, DiskLoc())) {
// bucket was full, so split and try with the new node.
- _txn->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
+ _opCtx->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
_rightLeafLoc = newBucket(rightLeaf, _rightLeafLoc);
rightLeaf = _getModifiableBucket(_rightLeafLoc);
invariant(_logic->pushBack(rightLeaf, loc, *key, DiskLoc()));
@@ -166,14 +166,14 @@ DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc
if (leftSib->parent.isNull()) {
// Making a new root
- invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_txn));
- const DiskLoc newRootLoc = _logic->_addBucket(_txn);
+ invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_opCtx));
+ const DiskLoc newRootLoc = _logic->_addBucket(_opCtx);
leftSib->parent = newRootLoc;
- _logic->_headManager->setHead(_txn, newRootLoc.toRecordId());
+ _logic->_headManager->setHead(_opCtx, newRootLoc.toRecordId());
// Set the newRoot's nextChild to point to leftSib for the invariant below.
BucketType* newRoot = _getBucket(newRootLoc);
- *_txn->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
+ *_opCtx->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
}
DiskLoc parentLoc = leftSib->parent;
@@ -198,23 +198,23 @@ DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc
// Create a new bucket to the right of leftSib and set its parent pointer and the downward
// nextChild pointer from the parent.
- DiskLoc newBucketLoc = _logic->_addBucket(_txn);
+ DiskLoc newBucketLoc = _logic->_addBucket(_opCtx);
BucketType* newBucket = _getBucket(newBucketLoc);
- *_txn->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
- *_txn->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
+ *_opCtx->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
+ *_opCtx->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
return newBucketLoc;
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType*
BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) {
- return _logic->btreemod(_txn, _logic->getBucket(_txn, loc));
+ return _logic->btreemod(_opCtx, _logic->getBucket(_opCtx, loc));
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::Builder::_getBucket(
DiskLoc loc) {
- return _logic->getBucket(_txn, loc);
+ return _logic->getBucket(_opCtx, loc);
}
//
@@ -261,8 +261,8 @@ char* BtreeLogic<BtreeLayout>::dataAt(BucketType* bucket, short ofs) {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::btreemod(
- OperationContext* txn, BucketType* bucket) {
- txn->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
+ OperationContext* opCtx, BucketType* bucket) {
+ opCtx->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
return bucket;
}
@@ -433,7 +433,7 @@ bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
* Returns false if a split is required.
*/
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int& keypos,
@@ -444,13 +444,13 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
if (bytesNeeded > bucket->emptySize) {
- _pack(txn, bucket, bucketLoc, keypos);
+ _pack(opCtx, bucket, bucketLoc, keypos);
if (bytesNeeded > bucket->emptySize) {
return false;
}
}
- invariant(getBucket(txn, bucketLoc) == bucket);
+ invariant(getBucket(opCtx, bucketLoc) == bucket);
{
// declare that we will write to [k(keypos),k(n)]
@@ -458,7 +458,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
char* end = reinterpret_cast<char*>(&getKeyHeader(bucket, bucket->n + 1));
// Declare that we will write to [k(keypos),k(n)]
- txn->recoveryUnit()->writingPtr(start, end - start);
+ opCtx->recoveryUnit()->writingPtr(start, end - start);
}
// e.g. for n==3, keypos==2
@@ -468,7 +468,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
}
size_t writeLen = sizeof(bucket->emptySize) + sizeof(bucket->topSize) + sizeof(bucket->n);
- txn->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
+ opCtx->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
bucket->emptySize -= sizeof(KeyHeaderType);
bucket->n++;
@@ -478,7 +478,7 @@ bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
kn.recordLoc = recordLoc;
kn.setKeyDataOfs((short)_alloc(bucket, key.dataSize()));
char* p = dataAt(bucket, kn.keyDataOfs());
- txn->recoveryUnit()->writingPtr(p, key.dataSize());
+ opCtx->recoveryUnit()->writingPtr(p, key.dataSize());
memcpy(p, key.data(), key.dataSize());
return true;
}
@@ -515,17 +515,17 @@ int BtreeLogic<BtreeLayout>::_packedDataSize(BucketType* bucket, int refPos) {
* it.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::_pack(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::_pack(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int& refPos) {
- invariant(getBucket(txn, thisLoc) == bucket);
+ invariant(getBucket(opCtx, thisLoc) == bucket);
if (bucket->flags & Packed) {
return;
}
- _packReadyForMod(btreemod(txn, bucket), refPos);
+ _packReadyForMod(btreemod(opCtx, bucket), refPos);
}
/**
@@ -669,44 +669,44 @@ void BtreeLogic<BtreeLayout>::dropFront(BucketType* bucket, int nDrop, int& refp
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
pair<DiskLoc, int> unused;
- customLocate(txn, locInOut, keyOfsInOut, seekPoint, direction, unused);
- skipUnusedKeys(txn, locInOut, keyOfsInOut, direction);
+ customLocate(opCtx, locInOut, keyOfsInOut, seekPoint, direction, unused);
+ skipUnusedKeys(opCtx, locInOut, keyOfsInOut, direction);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advance(OperationContext* opCtx,
DiskLoc* bucketLocInOut,
int* posInOut,
int direction) const {
- *bucketLocInOut = advance(txn, *bucketLocInOut, posInOut, direction);
- skipUnusedKeys(txn, bucketLocInOut, posInOut, direction);
+ *bucketLocInOut = advance(opCtx, *bucketLocInOut, posInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, posInOut, direction);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* opCtx,
DiskLoc* loc,
int* pos,
int direction) const {
- while (!loc->isNull() && !keyIsUsed(txn, *loc, *pos)) {
- *loc = advance(txn, *loc, pos, direction);
+ while (!loc->isNull() && !keyIsUsed(opCtx, *loc, *pos)) {
+ *loc = advance(opCtx, *loc, pos, direction);
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
- advanceToImpl(txn, thisLocInOut, keyOfsInOut, seekPoint, direction);
- skipUnusedKeys(txn, thisLocInOut, keyOfsInOut, direction);
+ advanceToImpl(opCtx, thisLocInOut, keyOfsInOut, seekPoint, direction);
+ skipUnusedKeys(opCtx, thisLocInOut, keyOfsInOut, direction);
}
/**
@@ -719,12 +719,12 @@ void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
* and reverse implementations would be more efficient
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
- BucketType* bucket = getBucket(txn, *thisLocInOut);
+ BucketType* bucket = getBucket(opCtx, *thisLocInOut);
int l, h;
bool dontGoUp;
@@ -745,14 +745,14 @@ void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
if (dontGoUp) {
// this comparison result assures h > l
- if (!customFind(txn, l, h, seekPoint, direction, thisLocInOut, keyOfsInOut, bestParent)) {
+ if (!customFind(opCtx, l, h, seekPoint, direction, thisLocInOut, keyOfsInOut, bestParent)) {
return;
}
} else {
// go up parents until rightmost/leftmost node is >=/<= target or at top
while (!bucket->parent.isNull()) {
*thisLocInOut = bucket->parent;
- bucket = getBucket(txn, *thisLocInOut);
+ bucket = getBucket(opCtx, *thisLocInOut);
if (direction > 0) {
if (customBSONCmp(getFullKey(bucket, bucket->n - 1).data.toBson(),
@@ -768,17 +768,17 @@ void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
}
}
- customLocate(txn, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
+ customLocate(opCtx, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction,
pair<DiskLoc, int>& bestParent) const {
- BucketType* bucket = getBucket(txn, *locInOut);
+ BucketType* bucket = getBucket(opCtx, *locInOut);
if (0 == bucket->n) {
*locInOut = DiskLoc();
@@ -809,7 +809,7 @@ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
if (!next.isNull()) {
bestParent = pair<DiskLoc, int>(*locInOut, *keyOfsInOut);
*locInOut = next;
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
continue;
} else {
return;
@@ -832,21 +832,21 @@ void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
return;
} else {
*locInOut = next;
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
continue;
}
}
- if (!customFind(txn, l, h, seekPoint, direction, locInOut, keyOfsInOut, bestParent)) {
+ if (!customFind(opCtx, l, h, seekPoint, direction, locInOut, keyOfsInOut, bestParent)) {
return;
}
- bucket = getBucket(txn, *locInOut);
+ bucket = getBucket(opCtx, *locInOut);
}
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::customFind(OperationContext* opCtx,
int low,
int high,
const IndexSeekPoint& seekPoint,
@@ -854,7 +854,7 @@ bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
pair<DiskLoc, int>& bestParent) const {
- const BucketType* bucket = getBucket(txn, *thisLocInOut);
+ const BucketType* bucket = getBucket(opCtx, *thisLocInOut);
for (;;) {
if (low + 1 == high) {
@@ -942,31 +942,31 @@ int BtreeLogic<BtreeLayout>::customBSONCmp(const BSONObj& left,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::exists(OperationContext* txn, const KeyDataType& key) const {
+bool BtreeLogic<BtreeLayout>::exists(OperationContext* opCtx, const KeyDataType& key) const {
int position = 0;
// Find the DiskLoc
bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ DiskLoc bucket = _locate(opCtx, getRootLoc(opCtx), key, &position, &found, DiskLoc::min(), 1);
while (!bucket.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
+ FullKey fullKey = getFullKey(getBucket(opCtx, bucket), position);
if (fullKey.header.isUsed()) {
return fullKey.data.woEqual(key);
}
- bucket = advance(txn, bucket, &position, 1);
+ bucket = advance(opCtx, bucket, &position, 1);
}
return false;
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& loc) const {
KeyDataOwnedType theKey(key);
- if (!wouldCreateDup(txn, theKey, loc)) {
+ if (!wouldCreateDup(opCtx, theKey, loc)) {
return Status::OK();
}
@@ -974,16 +974,16 @@ Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* opCtx,
const KeyDataType& key,
const DiskLoc self) const {
int position;
bool found;
- DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ DiskLoc posLoc = _locate(opCtx, getRootLoc(opCtx), key, &position, &found, DiskLoc::min(), 1);
while (!posLoc.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
+ FullKey fullKey = getFullKey(getBucket(opCtx, posLoc), position);
if (fullKey.header.isUsed()) {
// TODO: we may not need fullKey.data until we know fullKey.header.isUsed() here
// and elsewhere.
@@ -993,7 +993,7 @@ bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
break;
}
- posLoc = advance(txn, posLoc, &position, 1);
+ posLoc = advance(opCtx, posLoc, &position, 1);
}
return false;
}
@@ -1022,7 +1022,7 @@ string BtreeLogic<BtreeLayout>::dupKeyError(const KeyDataType& key) const {
* note result might be an Unused location!
*/
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::_find(OperationContext* opCtx,
BucketType* bucket,
const KeyDataType& key,
const DiskLoc& recordLoc,
@@ -1056,8 +1056,8 @@ Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
// This is expensive and we only want to do it once(? -- when would
// it happen twice).
dupsCheckedYet = true;
- if (exists(txn, key)) {
- if (wouldCreateDup(txn, key, genericRecordLoc)) {
+ if (exists(opCtx, key)) {
+ if (wouldCreateDup(opCtx, key, genericRecordLoc)) {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
} else {
return Status(ErrorCodes::DuplicateKeyValue,
@@ -1123,54 +1123,54 @@ Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::delBucket(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
- invariant(bucketLoc != getRootLoc(txn));
+ invariant(bucketLoc != getRootLoc(opCtx));
_cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
- deallocBucket(txn, bucket, bucketLoc);
+ BucketType* p = getBucket(opCtx, bucket->parent);
+ int parentIdx = indexInParent(opCtx, bucket, bucketLoc);
+ *opCtx->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
+ deallocBucket(opCtx, bucket, bucketLoc);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
bucket->n = BtreeLayout::INVALID_N_SENTINEL;
bucket->parent.Null();
- _recordStore->deleteRecord(txn, bucketLoc.toRecordId());
+ _recordStore->deleteRecord(opCtx, bucketLoc.toRecordId());
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* opCtx,
const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
DiskLoc* bucketLocInOut,
int* keyOffsetInOut) const {
// The caller has to ensure validity of the saved cursor using the SavedCursorRegistry
- BucketType* bucket = getBucket(txn, *bucketLocInOut);
+ BucketType* bucket = getBucket(opCtx, *bucketLocInOut);
invariant(bucket);
invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, keyOffsetInOut, direction);
return;
}
if (*keyOffsetInOut > 0) {
(*keyOffsetInOut)--;
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
+ skipUnusedKeys(opCtx, bucketLocInOut, keyOffsetInOut, direction);
return;
}
}
- locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+ locate(opCtx, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
}
template <class BtreeLayout>
@@ -1193,7 +1193,7 @@ bool BtreeLogic<BtreeLayout>::_keyIsAt(const BSONObj& savedKey,
* May delete the bucket 'bucket' rendering 'bucketLoc' invalid.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int p) {
@@ -1205,24 +1205,24 @@ void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
if (isHead(bucket)) {
// we don't delete the top bucket ever
} else {
- if (!mayBalanceWithNeighbors(txn, bucket, bucketLoc)) {
+ if (!mayBalanceWithNeighbors(opCtx, bucket, bucketLoc)) {
// An empty bucket is only allowed as a txnient state. If
// there are no neighbors to balance with, we delete ourself.
// This condition is only expected in legacy btrees.
- delBucket(txn, bucket, bucketLoc);
+ delBucket(opCtx, bucket, bucketLoc);
}
}
return;
}
- deleteInternalKey(txn, bucket, bucketLoc, p);
+ deleteInternalKey(opCtx, bucket, bucketLoc, p);
return;
}
if (left.isNull()) {
_delKeyAtPos(bucket, p);
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
+ mayBalanceWithNeighbors(opCtx, bucket, bucketLoc);
} else {
- deleteInternalKey(txn, bucket, bucketLoc, p);
+ deleteInternalKey(opCtx, bucket, bucketLoc, p);
}
}
@@ -1250,7 +1250,7 @@ void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
* legacy btree.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos) {
@@ -1259,11 +1259,11 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
invariant(!lchild.isNull() || !rchild.isNull());
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
- DiskLoc advanceLoc = advance(txn, bucketLoc, &advanceKeyOfs, advanceDirection);
+ DiskLoc advanceLoc = advance(opCtx, bucketLoc, &advanceKeyOfs, advanceDirection);
// advanceLoc must be a descentant of thisLoc, because thisLoc has a
// child in the proper direction and all descendants of thisLoc must be
// nonempty because they are not the root.
- BucketType* advanceBucket = getBucket(txn, advanceLoc);
+ BucketType* advanceBucket = getBucket(opCtx, advanceLoc);
if (!childLocForPos(advanceBucket, advanceKeyOfs).isNull() ||
!childLocForPos(advanceBucket, advanceKeyOfs + 1).isNull()) {
@@ -1275,7 +1275,7 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
// Because advanceLoc is a descendant of thisLoc, updating thisLoc will
// not affect packing or keys of advanceLoc and kn will be stable
// during the following setInternalKey()
- setInternalKey(txn,
+ setInternalKey(opCtx,
bucket,
bucketLoc,
keypos,
@@ -1283,31 +1283,31 @@ void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
kn.data,
childLocForPos(bucket, keypos),
childLocForPos(bucket, keypos + 1));
- delKeyAtPos(txn, btreemod(txn, advanceBucket), advanceLoc, advanceKeyOfs);
+ delKeyAtPos(opCtx, btreemod(opCtx, advanceBucket), advanceLoc, advanceKeyOfs);
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
invariant(bucket->n == 0 && !bucket->nextChild.isNull());
if (bucket->parent.isNull()) {
- invariant(getRootLoc(txn) == bucketLoc);
- _headManager->setHead(txn, bucket->nextChild.toRecordId());
+ invariant(getRootLoc(opCtx) == bucketLoc);
+ _headManager->setHead(opCtx, bucket->nextChild.toRecordId());
} else {
- BucketType* parentBucket = getBucket(txn, bucket->parent);
- int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
+ BucketType* parentBucket = getBucket(opCtx, bucket->parent);
+ int bucketIndexInParent = indexInParent(opCtx, bucket, bucketLoc);
+ *opCtx->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
bucket->nextChild;
}
- *txn->recoveryUnit()->writing(&getBucket(txn, bucket->nextChild)->parent) = bucket->parent;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, bucket->nextChild)->parent) = bucket->parent;
_cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- deallocBucket(txn, bucket, bucketLoc);
+ deallocBucket(opCtx, bucket, bucketLoc);
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const int leftIndex) {
@@ -1322,8 +1322,8 @@ bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
int pos = 0;
- BucketType* leftBucket = getBucket(txn, leftNodeLoc);
- BucketType* rightBucket = getBucket(txn, rightNodeLoc);
+ BucketType* leftBucket = getBucket(opCtx, leftNodeLoc);
+ BucketType* rightBucket = getBucket(opCtx, rightNodeLoc);
int sum = BucketType::HeaderSize + _packedDataSize(leftBucket, pos) +
_packedDataSize(rightBucket, pos) + getFullKey(bucket, leftIndex).data.dataSize() +
@@ -1337,14 +1337,14 @@ bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
* splitPos().
*/
template <class BtreeLayout>
-int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
+int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* opCtx,
BucketType* bucket,
int leftIndex) {
int split = -1;
int rightSize = 0;
- const BucketType* l = childForPos(txn, bucket, leftIndex);
- const BucketType* r = childForPos(txn, bucket, leftIndex + 1);
+ const BucketType* l = childForPos(opCtx, bucket, leftIndex);
+ const BucketType* r = childForPos(opCtx, bucket, leftIndex + 1);
int KNS = sizeof(KeyHeaderType);
int rightSizeLimit = (l->topSize + l->n * KNS + getFullKey(bucket, leftIndex).data.dataSize() +
@@ -1391,15 +1391,15 @@ int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
- BucketType* l = btreemod(txn, getBucket(txn, leftNodeLoc));
- BucketType* r = btreemod(txn, getBucket(txn, rightNodeLoc));
+ BucketType* l = btreemod(opCtx, getBucket(opCtx, leftNodeLoc));
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rightNodeLoc));
int pos = 0;
_packReadyForMod(l, pos);
@@ -1417,8 +1417,8 @@ void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
}
l->nextChild = r->nextChild;
- fixParentPtrs(txn, l, leftNodeLoc, oldLNum);
- delBucket(txn, r, rightNodeLoc);
+ fixParentPtrs(opCtx, l, leftNodeLoc, oldLNum);
+ delBucket(opCtx, r, rightNodeLoc);
childLocForPos(bucket, leftIndex + 1) = leftNodeLoc;
childLocForPos(bucket, leftIndex) = DiskLoc();
@@ -1429,18 +1429,18 @@ void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
//
// TODO To ensure all leaves are of equal height, we should ensure this is only called
// on the root.
- replaceWithNextChild(txn, bucket, bucketLoc);
+ replaceWithNextChild(opCtx, bucket, bucketLoc);
} else {
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
+ mayBalanceWithNeighbors(opCtx, bucket, bucketLoc);
}
}
template <class BtreeLayout>
-int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
+int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) const {
invariant(!bucket->parent.isNull());
- const BucketType* p = getBucket(txn, bucket->parent);
+ const BucketType* p = getBucket(opCtx, bucket->parent);
if (p->nextChild == bucketLoc) {
return p->n;
}
@@ -1461,22 +1461,22 @@ int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
// If we can merge, then we must merge rather than balance to preserve bucket utilization
// constraints.
- if (canMergeChildren(txn, bucket, bucketLoc, leftIndex)) {
+ if (canMergeChildren(opCtx, bucket, bucketLoc, leftIndex)) {
return false;
}
- doBalanceChildren(txn, btreemod(txn, bucket), bucketLoc, leftIndex);
+ doBalanceChildren(opCtx, btreemod(opCtx, bucket), bucketLoc, leftIndex);
return true;
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex,
@@ -1500,14 +1500,14 @@ void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
FullKey leftIndexKN = getFullKey(bucket, leftIndex);
setKey(r, rAdd - 1, leftIndexKN.recordLoc, leftIndexKN.data, l->nextChild);
- fixParentPtrs(txn, r, rchild, 0, rAdd - 1);
+ fixParentPtrs(opCtx, r, rchild, 0, rAdd - 1);
FullKey kn = getFullKey(l, split);
l->nextChild = kn.prevChildBucket;
// Because lchild is a descendant of thisLoc, updating thisLoc will not affect packing or
// keys of lchild and kn will be stable during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
+ setInternalKey(opCtx, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
// lchild and rchild cannot be merged, so there must be >0 (actually more) keys to the left
// of split.
@@ -1516,7 +1516,7 @@ void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex,
@@ -1546,11 +1546,11 @@ void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
FullKey kn = getFullKey(r, split - lN - 1);
l->nextChild = kn.prevChildBucket;
// Child lN was lchild's old nextChild, and don't need to fix that one.
- fixParentPtrs(txn, l, lchild, lN + 1, l->n);
+ fixParentPtrs(opCtx, l, lchild, lN + 1, l->n);
// Because rchild is a descendant of thisLoc, updating thisLoc will
// not affect packing or keys of rchild and kn will be stable
// during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
+ setInternalKey(opCtx, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
}
// lchild and rchild cannot be merged, so there must be >0 (actually more)
@@ -1560,7 +1560,7 @@ void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex) {
@@ -1568,26 +1568,26 @@ void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
DiskLoc rchild = childLocForPos(bucket, leftIndex + 1);
int zeropos = 0;
- BucketType* l = btreemod(txn, getBucket(txn, lchild));
+ BucketType* l = btreemod(opCtx, getBucket(opCtx, lchild));
_packReadyForMod(l, zeropos);
- BucketType* r = btreemod(txn, getBucket(txn, rchild));
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rchild));
_packReadyForMod(r, zeropos);
- int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
+ int split = _rebalancedSeparatorPos(opCtx, bucket, leftIndex);
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
invariant(split != l->n);
if (split < l->n) {
- doBalanceLeftToRight(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ doBalanceLeftToRight(opCtx, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
} else {
- doBalanceRightToLeft(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ doBalanceRightToLeft(opCtx, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
}
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc) {
if (bucket->parent.isNull()) {
@@ -1598,8 +1598,8 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
return false;
}
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
+ BucketType* p = getBucket(opCtx, bucket->parent);
+ int parentIdx = indexInParent(opCtx, bucket, bucketLoc);
// TODO will missing neighbor case be possible long term? Should we try to merge/balance
// somehow in that case if so?
@@ -1609,20 +1609,20 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
// Balance if possible on one side - we merge only if absolutely necessary to preserve btree
// bucket utilization constraints since that's a more heavy duty operation (especially if we
// must re-split later).
- if (mayBalanceRight && tryBalanceChildren(txn, p, bucket->parent, parentIdx)) {
+ if (mayBalanceRight && tryBalanceChildren(opCtx, p, bucket->parent, parentIdx)) {
return true;
}
- if (mayBalanceLeft && tryBalanceChildren(txn, p, bucket->parent, parentIdx - 1)) {
+ if (mayBalanceLeft && tryBalanceChildren(opCtx, p, bucket->parent, parentIdx - 1)) {
return true;
}
- BucketType* pm = btreemod(txn, getBucket(txn, bucket->parent));
+ BucketType* pm = btreemod(opCtx, getBucket(opCtx, bucket->parent));
if (mayBalanceRight) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx);
+ doMergeChildren(opCtx, pm, bucket->parent, parentIdx);
return true;
} else if (mayBalanceLeft) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx - 1);
+ doMergeChildren(opCtx, pm, bucket->parent, parentIdx - 1);
return true;
}
@@ -1630,25 +1630,25 @@ bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::unindex(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::unindex(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc) {
int pos;
bool found = false;
KeyDataOwnedType ownedKey(key);
- DiskLoc loc = _locate(txn, getRootLoc(txn), ownedKey, &pos, &found, recordLoc, 1);
+ DiskLoc loc = _locate(opCtx, getRootLoc(opCtx), ownedKey, &pos, &found, recordLoc, 1);
if (found) {
- BucketType* bucket = btreemod(txn, getBucket(txn, loc));
- delKeyAtPos(txn, bucket, loc, pos);
- assertValid(_indexName, getRoot(txn), _ordering);
+ BucketType* bucket = btreemod(opCtx, getBucket(opCtx, loc));
+ delKeyAtPos(opCtx, bucket, loc, pos);
+ assertValid(_indexName, getRoot(opCtx), _ordering);
}
return found;
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
- return getRoot(txn)->n == 0;
+bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* opCtx) const {
+ return getRoot(opCtx)->n == 0;
}
/**
@@ -1656,12 +1656,12 @@ bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
* Maybe get rid of parent ptrs?
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int firstIndex,
int lastIndex) {
- invariant(getBucket(txn, bucketLoc) == bucket);
+ invariant(getBucket(opCtx, bucketLoc) == bucket);
if (lastIndex == -1) {
lastIndex = bucket->n;
@@ -1670,13 +1670,13 @@ void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
for (int i = firstIndex; i <= lastIndex; i++) {
const DiskLoc childLoc = childLocForPos(bucket, i);
if (!childLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, childLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, childLoc)->parent) = bucketLoc;
}
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -1696,7 +1696,7 @@ void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
// Just set temporarily - required to pass validation in insertHere()
childLocForPos(bucket, keypos) = lchild;
- insertHere(txn, bucketLoc, keypos, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, bucketLoc, keypos, key, recordLoc, lchild, rchild);
}
/**
@@ -1710,19 +1710,19 @@ void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
* intent code in basicInsert().
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::insertHere(OperationContext* opCtx,
const DiskLoc bucketLoc,
int pos,
const KeyDataType& key,
const DiskLoc recordLoc,
const DiskLoc leftChildLoc,
const DiskLoc rightChildLoc) {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
- if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) {
+ if (!basicInsert(opCtx, bucket, bucketLoc, pos, key, recordLoc)) {
// If basicInsert() fails, the bucket will be packed as required by split().
- split(txn,
- btreemod(txn, bucket),
+ split(opCtx,
+ btreemod(opCtx, bucket),
bucketLoc,
pos,
recordLoc,
@@ -1741,9 +1741,9 @@ void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
}
kn->prevChildBucket = bucket->nextChild;
invariant(kn->prevChildBucket == leftChildLoc);
- *txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
+ *opCtx->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rightChildLoc)->parent) = bucketLoc;
}
} else {
kn->prevChildBucket = leftChildLoc;
@@ -1755,13 +1755,13 @@ void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
// Intent declared in basicInsert()
*const_cast<LocType*>(pc) = rightChildLoc;
if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rightChildLoc)->parent) = bucketLoc;
}
}
}
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::split(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -1770,8 +1770,8 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
const DiskLoc lchild,
const DiskLoc rchild) {
int split = splitPos(bucket, keypos);
- DiskLoc rLoc = _addBucket(txn);
- BucketType* r = btreemod(txn, getBucket(txn, rLoc));
+ DiskLoc rLoc = _addBucket(opCtx);
+ BucketType* r = btreemod(opCtx, getBucket(opCtx, rLoc));
for (int i = split + 1; i < bucket->n; i++) {
FullKey kn = getFullKey(bucket, i);
@@ -1781,7 +1781,7 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
assertValid(_indexName, r, _ordering);
r = NULL;
- fixParentPtrs(txn, getBucket(txn, rLoc), rLoc);
+ fixParentPtrs(opCtx, getBucket(opCtx, rLoc), rLoc);
FullKey splitkey = getFullKey(bucket, split);
// splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
@@ -1792,20 +1792,20 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
if (bucket->parent.isNull()) {
// promote splitkey to a parent this->node make a new parent if we were the root
- DiskLoc L = _addBucket(txn);
- BucketType* p = btreemod(txn, getBucket(txn, L));
+ DiskLoc L = _addBucket(opCtx);
+ BucketType* p = btreemod(opCtx, getBucket(opCtx, L));
invariant(pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc));
p->nextChild = rLoc;
assertValid(_indexName, p, _ordering);
bucket->parent = L;
- _headManager->setHead(txn, L.toRecordId());
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
+ _headManager->setHead(opCtx, L.toRecordId());
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rLoc)->parent) = bucket->parent;
} else {
// set this before calling _insert - if it splits it will do fixParent() logic and
// change the value.
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
- _insert(txn,
- getBucket(txn, bucket->parent),
+ *opCtx->recoveryUnit()->writing(&getBucket(opCtx, rLoc)->parent) = bucket->parent;
+ _insert(opCtx,
+ getBucket(opCtx, bucket->parent),
bucket->parent,
splitkey.data,
splitkey.recordLoc,
@@ -1820,11 +1820,11 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
// add our this->new key, there is room this->now
if (keypos <= split) {
- insertHere(txn, bucketLoc, newpos, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, bucketLoc, newpos, key, recordLoc, lchild, rchild);
} else {
int kp = keypos - split - 1;
invariant(kp >= 0);
- insertHere(txn, rLoc, kp, key, recordLoc, lchild, rchild);
+ insertHere(opCtx, rLoc, kp, key, recordLoc, lchild, rchild);
}
}
@@ -1842,24 +1842,24 @@ private:
};
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) {
- if (!_headManager->getHead(txn).isNull()) {
+Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* opCtx) {
+ if (!_headManager->getHead(opCtx).isNull()) {
return Status(ErrorCodes::InternalError, "index already initialized");
}
- _headManager->setHead(txn, _addBucket(txn).toRecordId());
+ _headManager->setHead(opCtx, _addBucket(opCtx).toRecordId());
return Status::OK();
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* txn) {
+DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* opCtx) {
DummyDocWriter docWriter(BtreeLayout::BucketSize);
- StatusWith<RecordId> loc = _recordStore->insertRecordWithDocWriter(txn, &docWriter);
+ StatusWith<RecordId> loc = _recordStore->insertRecordWithDocWriter(opCtx, &docWriter);
// XXX: remove this(?) or turn into massert or sanely bubble it back up.
uassertStatusOK(loc.getStatus());
// this is a new bucket, not referenced by anyone, probably don't need this lock
- BucketType* b = btreemod(txn, getBucket(txn, loc.getValue()));
+ BucketType* b = btreemod(opCtx, getBucket(opCtx, loc.getValue()));
init(b);
return DiskLoc::fromRecordId(loc.getValue());
}
@@ -1888,20 +1888,20 @@ void BtreeLogic<BtreeLayout>::dumpBucket(const BucketType* bucket, int indentLen
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
return getKeyHeader(bucket, keyOffset).recordLoc;
}
template <class BtreeLayout>
-BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
+BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const int keyOffset) const {
invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
int n = bucket->n;
invariant(n != BtreeLayout::INVALID_N_SENTINEL);
invariant(n >= 0);
@@ -1920,7 +1920,7 @@ BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
}
template <class BtreeLayout>
-IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) const {
+IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* opCtx) const {
// To ensure a uniform distribution, all keys must have an equal probability of being selected.
// Specifically, a key from the root should have the same probability of being selected as a key
// from a leaf.
@@ -1934,19 +1934,19 @@ IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) con
// As a simplification, we treat all buckets in a given level as having the same number of
// children. While this is inaccurate if the tree isn't perfectly balanced or if key-size
// greatly varies, it is assumed to be good enough for this purpose.
- invariant(!isEmpty(txn));
- BucketType* root = getRoot(txn);
+ invariant(!isEmpty(opCtx));
+ BucketType* root = getRoot(opCtx);
vector<int64_t> nKeysInLevel;
vector<FullKey> selectedKeys;
- auto& prng = txn->getClient()->getPrng();
+ auto& prng = opCtx->getClient()->getPrng();
int nRetries = 0;
const int kMaxRetries = 5;
do {
// See documentation below for description of parameters.
- recordRandomWalk(txn, &prng, root, 1, &nKeysInLevel, &selectedKeys);
+ recordRandomWalk(opCtx, &prng, root, 1, &nKeysInLevel, &selectedKeys);
} while (selectedKeys.empty() && nRetries++ < kMaxRetries);
massert(28826,
str::stream() << "index " << _indexName << " may be corrupt, please repair",
@@ -1989,7 +1989,7 @@ IndexKeyEntry BtreeLogic<BtreeLayout>::getRandomEntry(OperationContext* txn) con
* from the bucket we went through on the ith level of the B-tree.
*/
template <class BtreeLayout>
-void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* txn,
+void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* opCtx,
PseudoRandom* prng,
BucketType* curBucket,
int64_t nBucketsInCurrentLevel,
@@ -2008,34 +2008,34 @@ void BtreeLogic<BtreeLayout>::recordRandomWalk(OperationContext* txn,
// Select a random child and descend (if there are any).
int nChildren = nKeys + 1;
int nextChild = prng->nextInt32(nChildren);
- if (auto child = childForPos(txn, curBucket, nextChild)) {
+ if (auto child = childForPos(opCtx, curBucket, nextChild)) {
recordRandomWalk(
- txn, prng, child, nBucketsInCurrentLevel * nChildren, nKeysInLevel, selectedKeys);
+ opCtx, prng, child, nBucketsInCurrentLevel * nChildren, nKeysInLevel, selectedKeys);
}
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::touch(OperationContext* txn) const {
- return _recordStore->touch(txn, NULL);
+Status BtreeLogic<BtreeLayout>::touch(OperationContext* opCtx) const {
+ return _recordStore->touch(opCtx, NULL);
}
template <class BtreeLayout>
-long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* txn,
+long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* opCtx,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const {
- return _fullValidate(txn, getRootLoc(txn), unusedCount, strict, dumpBuckets, depth);
+ return _fullValidate(opCtx, getRootLoc(opCtx), unusedCount, strict, dumpBuckets, depth);
}
template <class BtreeLayout>
-long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
+long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* opCtx,
const DiskLoc bucketLoc,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
assertValid(_indexName, bucket, _ordering, true);
if (dumpBuckets) {
@@ -2056,7 +2056,7 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
if (!kn.prevChildBucket.isNull()) {
DiskLoc left = kn.prevChildBucket;
- BucketType* b = getBucket(txn, left);
+ BucketType* b = getBucket(opCtx, left);
if (strict) {
invariant(b->parent == bucketLoc);
@@ -2064,12 +2064,12 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
wassert(b->parent == bucketLoc);
}
- keyCount += _fullValidate(txn, left, unusedCount, strict, dumpBuckets, depth + 1);
+ keyCount += _fullValidate(opCtx, left, unusedCount, strict, dumpBuckets, depth + 1);
}
}
if (!bucket->nextChild.isNull()) {
- BucketType* b = getBucket(txn, bucket->nextChild);
+ BucketType* b = getBucket(opCtx, bucket->nextChild);
if (strict) {
invariant(b->parent == bucketLoc);
} else {
@@ -2077,7 +2077,7 @@ long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
}
keyCount +=
- _fullValidate(txn, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
+ _fullValidate(opCtx, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
}
return keyCount;
@@ -2149,7 +2149,7 @@ void BtreeLogic<BtreeLayout>::assertValid(const std::string& ns,
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::insert(OperationContext* opCtx,
const BSONObj& rawKey,
const DiskLoc& value,
bool dupsAllowed) {
@@ -2161,15 +2161,15 @@ Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
return Status(ErrorCodes::KeyTooLong, msg);
}
- Status status =
- _insert(txn, getRoot(txn), getRootLoc(txn), key, value, dupsAllowed, DiskLoc(), DiskLoc());
+ Status status = _insert(
+ opCtx, getRoot(opCtx), getRootLoc(opCtx), key, value, dupsAllowed, DiskLoc(), DiskLoc());
- assertValid(_indexName, getRoot(txn), _ordering);
+ assertValid(_indexName, getRoot(opCtx), _ordering);
return status;
}
template <class BtreeLayout>
-Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
+Status BtreeLogic<BtreeLayout>::_insert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const KeyDataType& key,
@@ -2181,7 +2181,7 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
int pos;
bool found;
- Status findStatus = _find(txn, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
+ Status findStatus = _find(opCtx, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
if (!findStatus.isOK()) {
return findStatus;
}
@@ -2192,7 +2192,7 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
LOG(4) << "btree _insert: reusing unused key" << endl;
massert(17433, "_insert: reuse key but lchild is not null", leftChild.isNull());
massert(17434, "_insert: reuse key but rchild is not null", rightChild.isNull());
- txn->recoveryUnit()->writing(&header)->setUsed();
+ opCtx->recoveryUnit()->writing(&header)->setUsed();
return Status::OK();
}
// The logic in _find() prohibits finding and returning a position if the 'used' bit
@@ -2210,11 +2210,11 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
// promoting a split key. These are the only two cases where _insert() is called
// currently.
if (childLoc.isNull() || !rightChild.isNull()) {
- insertHere(txn, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
+ insertHere(opCtx, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
return Status::OK();
} else {
- return _insert(txn,
- getBucket(txn, childLoc),
+ return _insert(opCtx,
+ getBucket(opCtx, childLoc),
childLoc,
key,
recordLoc,
@@ -2225,11 +2225,11 @@ Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* opCtx,
const DiskLoc& bucketLoc,
int* posInOut,
int direction) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
if (*posInOut < 0 || *posInOut >= bucket->n) {
log() << "ASSERT failure advancing btree bucket" << endl;
@@ -2246,7 +2246,7 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
// Look down if we need to.
DiskLoc nextDownLoc = childLocForPos(bucket, ko + adj);
- BucketType* nextDown = getBucket(txn, nextDownLoc);
+ BucketType* nextDown = getBucket(opCtx, nextDownLoc);
if (NULL != nextDown) {
for (;;) {
if (direction > 0) {
@@ -2255,7 +2255,7 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
*posInOut = nextDown->n - 1;
}
DiskLoc newNextDownLoc = childLocForPos(nextDown, *posInOut + adj);
- BucketType* newNextDownBucket = getBucket(txn, newNextDownLoc);
+ BucketType* newNextDownBucket = getBucket(opCtx, newNextDownLoc);
if (NULL == newNextDownBucket) {
break;
}
@@ -2273,12 +2273,12 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
// Hit the end of the bucket, move up and over.
DiskLoc childLoc = bucketLoc;
- DiskLoc ancestor = getBucket(txn, bucketLoc)->parent;
+ DiskLoc ancestor = getBucket(opCtx, bucketLoc)->parent;
for (;;) {
if (ancestor.isNull()) {
break;
}
- BucketType* an = getBucket(txn, ancestor);
+ BucketType* an = getBucket(opCtx, ancestor);
for (int i = 0; i < an->n; i++) {
if (childLocForPos(an, i + adj) == childLoc) {
*posInOut = i;
@@ -2295,14 +2295,14 @@ DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* opCtx,
const DiskLoc& loc,
const int& pos) const {
- return getKeyHeader(getBucket(txn, loc), pos).isUsed();
+ return getKeyHeader(getBucket(opCtx, loc), pos).isUsed();
}
template <class BtreeLayout>
-bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
+bool BtreeLogic<BtreeLayout>::locate(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
@@ -2315,13 +2315,13 @@ bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
bool found = false;
KeyDataOwnedType owned(key);
- *bucketLocOut = _locate(txn, getRootLoc(txn), owned, posOut, &found, recordLoc, direction);
+ *bucketLocOut = _locate(opCtx, getRootLoc(opCtx), owned, posOut, &found, recordLoc, direction);
if (!found) {
return false;
}
- skipUnusedKeys(txn, bucketLocOut, posOut, direction);
+ skipUnusedKeys(opCtx, bucketLocOut, posOut, direction);
return found;
}
@@ -2331,7 +2331,7 @@ bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
* Caller should have acquired lock on bucketLoc.
*/
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
+DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const KeyDataType& key,
int* posOut,
@@ -2339,9 +2339,9 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
const DiskLoc& recordLoc,
const int direction) const {
int position;
- BucketType* bucket = getBucket(txn, bucketLoc);
+ BucketType* bucket = getBucket(opCtx, bucketLoc);
// XXX: owned to not owned conversion(?)
- _find(txn, bucket, key, recordLoc, false, &position, foundOut);
+ _find(opCtx, bucket, key, recordLoc, false, &position, foundOut);
// Look in our current bucket.
if (*foundOut) {
@@ -2353,7 +2353,7 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
DiskLoc childLoc = childLocForPos(bucket, position);
if (!childLoc.isNull()) {
- DiskLoc inChild = _locate(txn, childLoc, key, posOut, foundOut, recordLoc, direction);
+ DiskLoc inChild = _locate(opCtx, childLoc, key, posOut, foundOut, recordLoc, direction);
if (!inChild.isNull()) {
return inChild;
}
@@ -2389,12 +2389,12 @@ bool BtreeLogic<BtreeLayout>::isHead(BucketType* bucket) {
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getBucket(
- OperationContext* txn, const RecordId id) const {
+ OperationContext* opCtx, const RecordId id) const {
if (id.isNull()) {
return NULL;
}
- RecordData recordData = _recordStore->dataFor(txn, id);
+ RecordData recordData = _recordStore->dataFor(opCtx, id);
// we need to be working on the raw bytes, not a transient copy
invariant(!recordData.isOwned());
@@ -2404,20 +2404,20 @@ typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getBucket
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getRoot(
- OperationContext* txn) const {
- return getBucket(txn, _headManager->getHead(txn));
+ OperationContext* opCtx) const {
+ return getBucket(opCtx, _headManager->getHead(opCtx));
}
template <class BtreeLayout>
-DiskLoc BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
+DiskLoc BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* opCtx) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(opCtx));
}
template <class BtreeLayout>
typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::childForPos(
- OperationContext* txn, BucketType* bucket, int pos) const {
+ OperationContext* opCtx, BucketType* bucket, int pos) const {
DiskLoc loc = childLocForPos(bucket, pos);
- return getBucket(txn, loc);
+ return getBucket(opCtx, loc);
}
template <class BtreeLayout>
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
index 438cbc54f88..1f6f0645875 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
@@ -108,7 +108,7 @@ public:
class SetRightLeafLocChange;
- Builder(BtreeLogic* logic, OperationContext* txn, bool dupsAllowed);
+ Builder(BtreeLogic* logic, OperationContext* opCtx, bool dupsAllowed);
/**
* Creates and returns a new empty bucket to the right of leftSib, maintaining the
@@ -128,18 +128,18 @@ public:
std::unique_ptr<KeyDataOwnedType> _keyLast;
// Not owned.
- OperationContext* _txn;
+ OperationContext* _opCtx;
};
/**
* Caller owns the returned pointer.
* 'this' must outlive the returned pointer.
*/
- Builder* newBuilder(OperationContext* txn, bool dupsAllowed);
+ Builder* newBuilder(OperationContext* opCtx, bool dupsAllowed);
- Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) const;
+ Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const DiskLoc& loc) const;
- Status insert(OperationContext* txn,
+ Status insert(OperationContext* opCtx,
const BSONObj& rawKey,
const DiskLoc& value,
bool dupsAllowed);
@@ -152,23 +152,23 @@ public:
* bucketLocOut would contain the bucket containing key which is before or after the
* searched one (dependent on the direction).
*/
- bool locate(OperationContext* txn,
+ bool locate(OperationContext* opCtx,
const BSONObj& key,
const DiskLoc& recordLoc,
const int direction,
int* posOut,
DiskLoc* bucketLocOut) const;
- void advance(OperationContext* txn,
+ void advance(OperationContext* opCtx,
DiskLoc* bucketLocInOut,
int* posInOut,
int direction) const;
- bool exists(OperationContext* txn, const KeyDataType& key) const;
+ bool exists(OperationContext* opCtx, const KeyDataType& key) const;
- bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& recordLoc);
+ bool unindex(OperationContext* opCtx, const BSONObj& key, const DiskLoc& recordLoc);
- bool isEmpty(OperationContext* txn) const;
+ bool isEmpty(OperationContext* opCtx) const;
long long fullValidate(OperationContext*,
long long* unusedCount,
@@ -176,27 +176,29 @@ public:
bool dumpBuckets,
unsigned depth) const;
- DiskLoc getDiskLoc(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
+ DiskLoc getDiskLoc(OperationContext* opCtx,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const;
- BSONObj getKey(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
+ BSONObj getKey(OperationContext* opCtx, const DiskLoc& bucketLoc, const int keyOffset) const;
/**
* Returns a pseudo-random element from the tree. It is an error to call this method if the tree
* is empty.
*/
- IndexKeyEntry getRandomEntry(OperationContext* txn) const;
+ IndexKeyEntry getRandomEntry(OperationContext* opCtx) const;
- DiskLoc getHead(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
+ DiskLoc getHead(OperationContext* opCtx) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(opCtx));
}
- Status touch(OperationContext* txn) const;
+ Status touch(OperationContext* opCtx) const;
//
// Composite key navigation methods
//
- void customLocate(OperationContext* txn,
+ void customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
@@ -208,7 +210,7 @@ public:
const IndexSeekPoint& seekPoint,
int direction) const;
- void restorePosition(OperationContext* txn,
+ void restorePosition(OperationContext* opCtx,
const BSONObj& savedKey,
const DiskLoc& savedLoc,
int direction,
@@ -222,7 +224,7 @@ public:
/**
* Returns OK if the index was uninitialized before, error status otherwise.
*/
- Status initAsEmpty(OperationContext* txn);
+ Status initAsEmpty(OperationContext* opCtx);
//
// Size constants
@@ -319,7 +321,7 @@ private:
static void setNotPacked(BucketType* bucket);
- static BucketType* btreemod(OperationContext* txn, BucketType* bucket);
+ static BucketType* btreemod(OperationContext* opCtx, BucketType* bucket);
static int splitPos(BucketType* bucket, int keypos);
@@ -345,7 +347,7 @@ private:
// information).
//
- bool basicInsert(OperationContext* txn,
+ bool basicInsert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int& keypos,
@@ -354,16 +356,16 @@ private:
void dropFront(BucketType* bucket, int nDrop, int& refpos);
- void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int& refPos);
+ void _pack(OperationContext* opCtx, BucketType* bucket, const DiskLoc thisLoc, int& refPos);
- void customLocate(OperationContext* txn,
+ void customLocate(OperationContext* opCtx,
DiskLoc* locInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction,
std::pair<DiskLoc, int>& bestParent) const;
- Status _find(OperationContext* txn,
+ Status _find(OperationContext* opCtx,
BucketType* bucket,
const KeyDataType& key,
const DiskLoc& recordLoc,
@@ -371,7 +373,7 @@ private:
int* keyPositionOut,
bool* foundOut) const;
- bool customFind(OperationContext* txn,
+ bool customFind(OperationContext* opCtx,
int low,
int high,
const IndexSeekPoint& seekPoint,
@@ -380,24 +382,24 @@ private:
int* keyOfsInOut,
std::pair<DiskLoc, int>& bestParent) const;
- void advanceToImpl(OperationContext* txn,
+ void advanceToImpl(OperationContext* opCtx,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const;
- bool wouldCreateDup(OperationContext* txn, const KeyDataType& key, const DiskLoc self) const;
+ bool wouldCreateDup(OperationContext* opCtx, const KeyDataType& key, const DiskLoc self) const;
- bool keyIsUsed(OperationContext* txn, const DiskLoc& loc, const int& pos) const;
+ bool keyIsUsed(OperationContext* opCtx, const DiskLoc& loc, const int& pos) const;
- void skipUnusedKeys(OperationContext* txn, DiskLoc* loc, int* pos, int direction) const;
+ void skipUnusedKeys(OperationContext* opCtx, DiskLoc* loc, int* pos, int direction) const;
- DiskLoc advance(OperationContext* txn,
+ DiskLoc advance(OperationContext* opCtx,
const DiskLoc& bucketLoc,
int* posInOut,
int direction) const;
- DiskLoc _locate(OperationContext* txn,
+ DiskLoc _locate(OperationContext* opCtx,
const DiskLoc& bucketLoc,
const KeyDataType& key,
int* posOut,
@@ -405,28 +407,28 @@ private:
const DiskLoc& recordLoc,
const int direction) const;
- long long _fullValidate(OperationContext* txn,
+ long long _fullValidate(OperationContext* opCtx,
const DiskLoc bucketLoc,
long long* unusedCount,
bool strict,
bool dumpBuckets,
unsigned depth) const;
- DiskLoc _addBucket(OperationContext* txn);
+ DiskLoc _addBucket(OperationContext* opCtx);
- bool canMergeChildren(OperationContext* txn,
+ bool canMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const int leftIndex);
// has to look in children of 'bucket' and requires record store
- int _rebalancedSeparatorPos(OperationContext* txn, BucketType* bucket, int leftIndex);
+ int _rebalancedSeparatorPos(OperationContext* opCtx, BucketType* bucket, int leftIndex);
void _packReadyForMod(BucketType* bucket, int& refPos);
void truncateTo(BucketType* bucket, int N, int& refPos);
- void split(OperationContext* txn,
+ void split(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -435,7 +437,7 @@ private:
const DiskLoc lchild,
const DiskLoc rchild);
- Status _insert(OperationContext* txn,
+ Status _insert(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
const KeyDataType& key,
@@ -445,7 +447,7 @@ private:
const DiskLoc rightChild);
// TODO take a BucketType*?
- void insertHere(OperationContext* txn,
+ void insertHere(OperationContext* opCtx,
const DiskLoc bucketLoc,
int pos,
const KeyDataType& key,
@@ -455,7 +457,7 @@ private:
std::string dupKeyError(const KeyDataType& key) const;
- void setInternalKey(OperationContext* txn,
+ void setInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos,
@@ -470,16 +472,16 @@ private:
int firstIndex = 0,
int lastIndex = -1);
- bool mayBalanceWithNeighbors(OperationContext* txn,
+ bool mayBalanceWithNeighbors(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc);
- void doBalanceChildren(OperationContext* txn,
+ void doBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- void doBalanceLeftToRight(OperationContext* txn,
+ void doBalanceLeftToRight(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int leftIndex,
@@ -489,7 +491,7 @@ private:
BucketType* r,
const DiskLoc rchild);
- void doBalanceRightToLeft(OperationContext* txn,
+ void doBalanceRightToLeft(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc thisLoc,
int leftIndex,
@@ -499,30 +501,30 @@ private:
BucketType* r,
const DiskLoc rchild);
- bool tryBalanceChildren(OperationContext* txn,
+ bool tryBalanceChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- int indexInParent(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) const;
+ int indexInParent(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc) const;
- void doMergeChildren(OperationContext* txn,
+ void doMergeChildren(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int leftIndex);
- void replaceWithNextChild(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void replaceWithNextChild(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
- void deleteInternalKey(OperationContext* txn,
+ void deleteInternalKey(OperationContext* opCtx,
BucketType* bucket,
const DiskLoc bucketLoc,
int keypos);
- void delKeyAtPos(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int p);
+ void delKeyAtPos(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc, int p);
- void delBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void delBucket(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
- void deallocBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void deallocBucket(OperationContext* opCtx, BucketType* bucket, const DiskLoc bucketLoc);
bool _keyIsAt(const BSONObj& savedKey,
const DiskLoc& savedLoc,
@@ -543,18 +545,18 @@ private:
const DiskLoc prevChild);
- BucketType* childForPos(OperationContext* txn, BucketType* bucket, int pos) const;
+ BucketType* childForPos(OperationContext* opCtx, BucketType* bucket, int pos) const;
- BucketType* getBucket(OperationContext* txn, const DiskLoc dl) const {
- return getBucket(txn, dl.toRecordId());
+ BucketType* getBucket(OperationContext* opCtx, const DiskLoc dl) const {
+ return getBucket(opCtx, dl.toRecordId());
}
- BucketType* getBucket(OperationContext* txn, const RecordId dl) const;
+ BucketType* getBucket(OperationContext* opCtx, const RecordId dl) const;
- BucketType* getRoot(OperationContext* txn) const;
+ BucketType* getRoot(OperationContext* opCtx) const;
- DiskLoc getRootLoc(OperationContext* txn) const;
+ DiskLoc getRootLoc(OperationContext* opCtx) const;
- void recordRandomWalk(OperationContext* txn,
+ void recordRandomWalk(OperationContext* opCtx,
PseudoRandom* prng,
BucketType* curBucket,
int64_t nBucketsInCurrentLevel,
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 5d24c5b5713..e34a5c5a22e 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -63,18 +63,18 @@ public:
protected:
void checkValidNumKeys(int nKeys) {
- OperationContextNoop txn;
- ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ OperationContextNoop opCtx;
+ ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
}
Status insert(const BSONObj& key, const DiskLoc dl, bool dupsAllowed = true) {
- OperationContextNoop txn;
- return _helper.btree.insert(&txn, key, dl, dupsAllowed);
+ OperationContextNoop opCtx;
+ return _helper.btree.insert(&opCtx, key, dl, dupsAllowed);
}
bool unindex(const BSONObj& key) {
- OperationContextNoop txn;
- return _helper.btree.unindex(&txn, key, _helper.dummyDiskLoc);
+ OperationContextNoop opCtx;
+ return _helper.btree.unindex(&opCtx, key, _helper.dummyDiskLoc);
}
void locate(const BSONObj& key,
@@ -92,9 +92,10 @@ protected:
int direction) {
int pos;
DiskLoc loc;
- OperationContextNoop txn;
- ASSERT_EQUALS(expectedFound,
- _helper.btree.locate(&txn, key, _helper.dummyDiskLoc, direction, &pos, &loc));
+ OperationContextNoop opCtx;
+ ASSERT_EQUALS(
+ expectedFound,
+ _helper.btree.locate(&opCtx, key, _helper.dummyDiskLoc, direction, &pos, &loc));
ASSERT_EQUALS(expectedLocation, loc);
ASSERT_EQUALS(expectedPos, pos);
}
@@ -116,8 +117,8 @@ protected:
}
BucketType* head() const {
- OperationContextNoop txn;
- return _helper.btree.getBucket(&txn, _helper.headManager.getHead(&txn));
+ OperationContextNoop opCtx;
+ return _helper.btree.getBucket(&opCtx, _helper.headManager.getHead(&opCtx));
}
void forcePackBucket(const RecordId bucketLoc) {
@@ -138,8 +139,8 @@ protected:
int bucketRebalancedSeparatorPos(const RecordId bucketLoc, int leftIndex) {
BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
- OperationContextNoop txn;
- return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
+ OperationContextNoop opCtx;
+ return _helper.btree._rebalancedSeparatorPos(&opCtx, bucket, leftIndex);
}
FullKey getKey(const RecordId bucketLoc, int pos) const {
@@ -155,20 +156,20 @@ protected:
}
DiskLoc newBucket() {
- OperationContextNoop txn;
- return _helper.btree._addBucket(&txn);
+ OperationContextNoop opCtx;
+ return _helper.btree._addBucket(&opCtx);
}
/**
* Sets the nextChild pointer for the bucket at the specified location.
*/
void setBucketNextChild(const DiskLoc bucketLoc, const DiskLoc nextChild) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
- BucketType* bucket = _helper.btree.getBucket(&txn, bucketLoc);
+ BucketType* bucket = _helper.btree.getBucket(&opCtx, bucketLoc);
bucket->nextChild = nextChild;
- _helper.btree.fixParentPtrs(&txn, bucket, bucketLoc);
+ _helper.btree.fixParentPtrs(&opCtx, bucket, bucketLoc);
}
protected:
@@ -183,8 +184,8 @@ template <class OnDiskFormat>
class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->checkValidNumKeys(0);
}
@@ -194,14 +195,14 @@ template <class OnDiskFormat>
class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key = simpleKey('z');
this->insert(key, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(1);
- this->locate(key, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
this->unindex(key);
@@ -214,8 +215,8 @@ template <class OnDiskFormat>
class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
BSONObj shortKey = simpleKey(shortToken(i), 1);
@@ -278,17 +279,17 @@ template <class OnDiskFormat>
class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 3; ++i) {
BSONObj k = simpleKey('b' + 2 * i);
this->insert(k, this->_helper.dummyDiskLoc);
}
- locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&opCtx));
locateExtended(1, 'g', 'g' + 1, RecordId()); // of course, 'h' isn't in the index.
// old behavior
@@ -298,9 +299,9 @@ public:
// locateExtended( -1, 'g', 'f', dl() );
locateExtended(-1, 'a', 'a' - 1, RecordId()); // of course, 'a' - 1 isn't in the index
- locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&opCtx));
+ locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&opCtx));
}
private:
@@ -316,8 +317,8 @@ template <class OnDiskFormat>
class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
@@ -337,23 +338,23 @@ public:
// 'E' is the split point and should be in the head the rest should be ~50/50
const BSONObj splitPoint = simpleKey('E', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ this->_helper.btree.locate(&opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&opCtx), loc.toRecordId());
ASSERT_EQUALS(0, pos);
// Find the one before 'E'
int largePos;
DiskLoc largeLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&opCtx, &largeLoc, &largePos, -1);
// Find the one after 'E'
int smallPos;
DiskLoc smallLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&opCtx, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -368,8 +369,8 @@ template <class OnDiskFormat>
class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
@@ -389,23 +390,23 @@ public:
// 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
const BSONObj splitPoint = simpleKey('H', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ this->_helper.btree.locate(&opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&opCtx), loc.toRecordId());
ASSERT_EQUALS(0, pos);
// Find the one before 'H'
int largePos;
DiskLoc largeLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&opCtx, &largeLoc, &largePos, -1);
// Find the one after 'H'
int smallPos;
DiskLoc smallLoc;
this->_helper.btree.locate(
- &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+ &opCtx, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&opCtx, &smallLoc, &smallPos, 1);
ASSERT_NOT_EQUALS(smallLoc, largeLoc);
ASSERT_NOT_EQUALS(smallLoc, loc);
@@ -417,8 +418,8 @@ template <class OnDiskFormat>
class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
@@ -437,8 +438,8 @@ template <class OnDiskFormat>
class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
@@ -453,7 +454,7 @@ public:
long long unusedCount = 0;
ASSERT_EQUALS(expectedCount,
- this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ this->_helper.btree.fullValidate(&opCtx, &unusedCount, true, false, 0));
ASSERT_EQUALS(0, unusedCount);
}
@@ -493,8 +494,8 @@ template <class OnDiskFormat>
class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
for (int i = 0; i < 18; ++i) {
const BSONObj k = simpleKey('a' + i, 800);
@@ -509,7 +510,7 @@ public:
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
long long unusedCount = 0;
- ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&opCtx, &unusedCount, true, false, 0));
ASSERT_EQUALS(0, unusedCount);
}
};
@@ -518,11 +519,11 @@ template <class OnDiskFormat>
class MergeBucketsDelInternal : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -531,7 +532,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -544,11 +545,11 @@ template <class OnDiskFormat>
class MergeBucketsRightNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -557,7 +558,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -573,12 +574,12 @@ template <class OnDiskFormat>
class DontMergeSingleBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},c:null}}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -587,7 +588,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -600,12 +601,12 @@ template <class OnDiskFormat>
class ParentMergeNonRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -614,7 +615,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -628,12 +629,12 @@ template <class OnDiskFormat>
class ParentMergeNonRightToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -642,7 +643,7 @@ public:
<< "ff");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// Child does not currently replace parent in this case. Also, the tree
// has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
@@ -656,15 +657,15 @@ template <class OnDiskFormat>
class CantMergeRightNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{d:{b:{a:null},bb:null,cc:{c:null}},"
"dd:null,"
"_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -673,7 +674,7 @@ public:
<< "bb");
verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -689,12 +690,12 @@ template <class OnDiskFormat>
class CantMergeLeftNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}");
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -703,7 +704,7 @@ public:
<< "g");
verify(this->unindex(k));
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -716,12 +717,12 @@ template <class OnDiskFormat>
class MergeOption : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -730,7 +731,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -743,12 +744,12 @@ template <class OnDiskFormat>
class ForceMergeLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -757,7 +758,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -770,12 +771,12 @@ template <class OnDiskFormat>
class ForceMergeRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
@@ -784,7 +785,7 @@ public:
<< "ee");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -797,12 +798,12 @@ template <class OnDiskFormat>
class RecursiveMerge : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -811,7 +812,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -825,12 +826,12 @@ template <class OnDiskFormat>
class RecursiveMergeRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -839,7 +840,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -852,12 +853,12 @@ template <class OnDiskFormat>
class RecursiveMergeDoubleRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -866,7 +867,7 @@ public:
<< "c");
verify(this->unindex(k));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -882,20 +883,20 @@ public:
MergeSizeTestBase() : _count(0) {}
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
const BSONObj& topKey = biggestKey('m');
DiskLoc leftChild = this->newBucket();
builder.push(
- DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)), topKey, leftChild);
+ DiskLoc::fromRecordId(this->_helper.headManager.getHead(&opCtx)), topKey, leftChild);
_count++;
DiskLoc rightChild = this->newBucket();
- this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)),
+ this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&opCtx)),
rightChild);
_count += builder.fillBucketToExactSize(leftChild, leftSize(), 'a');
@@ -924,7 +925,8 @@ public:
const char* keys = delKeys();
for (const char* i = keys; *i; ++i) {
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(_count,
+ this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
ASSERT_EQUALS(0, unused);
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
@@ -937,7 +939,7 @@ public:
}
long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
ASSERT_EQUALS(0, unused);
validate();
@@ -1185,14 +1187,14 @@ protected:
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_NE(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
private:
@@ -1212,14 +1214,14 @@ protected:
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_NE(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
private:
@@ -1230,15 +1232,15 @@ template <class OnDiskFormat>
class BalanceOneLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"b:{$20:null,$30:null,$40:null,$50:null,a:null},"
"_:{c:null}}");
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1246,7 +1248,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1262,15 +1264,15 @@ template <class OnDiskFormat>
class BalanceOneRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null},"
"b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
"_:{c:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1278,7 +1280,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1294,8 +1296,8 @@ template <class OnDiskFormat>
class BalanceThreeLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},"
@@ -1303,7 +1305,7 @@ public:
"b:{$30:null,$40:{$35:null},$50:{$45:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
@@ -1311,7 +1313,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x30, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
@@ -1329,8 +1331,8 @@ template <class OnDiskFormat>
class BalanceThreeRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},"
@@ -1339,7 +1341,7 @@ public:
"$90:{$85:null},$100:{$95:null}},"
"_:{c:null}}");
- ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
@@ -1347,7 +1349,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x5, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
@@ -1365,14 +1367,14 @@ template <class OnDiskFormat>
class BalanceSingleParentKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1380,7 +1382,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1395,8 +1397,8 @@ template <class OnDiskFormat>
class PackEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null}");
@@ -1404,7 +1406,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1425,8 +1427,8 @@ template <class OnDiskFormat>
class PackedDataSizeEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null}");
@@ -1434,7 +1436,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
@@ -1449,25 +1451,25 @@ template <class OnDiskFormat>
class BalanceSingleParentKeyPackParent : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
// force parent pack
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+ this->forcePackBucket(this->_helper.headManager.getHead(&opCtx));
const BSONObj k = BSON("" << bigNumString(0x40, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -1482,8 +1484,8 @@ template <class OnDiskFormat>
class BalanceSplitParent : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10$10:{$1:null,$2:null,$3:null,$4:null},"
@@ -1491,7 +1493,7 @@ public:
"$200:null,$300:null,$400:null,$500:null,$600:null,"
"$700:null,$800:null,$900:null,_:{c:null}}");
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1499,7 +1501,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x3, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
@@ -1516,15 +1518,15 @@ template <class OnDiskFormat>
class RebalancedSeparatorBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(treeSpec());
modTree();
ASSERT_EQUALS(
expectedSeparator(),
- this->bucketRebalancedSeparatorPos(this->_helper.headManager.getHead(&txn), 0));
+ this->bucketRebalancedSeparatorPos(this->_helper.headManager.getHead(&opCtx), 0));
}
virtual string treeSpec() const = 0;
@@ -1658,14 +1660,14 @@ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
}
virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_EQ(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
virtual bool merge() const {
@@ -1686,10 +1688,10 @@ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight<OnDiskFormat
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Different top means we rebalanced
ASSERT_BSONOBJ_NE(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
};
@@ -1699,14 +1701,14 @@ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1;
}
virtual void initCheck() {
- OperationContextNoop txn;
- this->_oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ OperationContextNoop opCtx;
+ this->_oldTop = this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson();
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
ASSERT_BSONOBJ_EQ(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
virtual bool merge() const {
return false;
@@ -1726,10 +1728,10 @@ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft<OnDiskFormat>
}
virtual void validate() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
// Different top means we rebalanced
ASSERT_BSONOBJ_NE(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 0).data.toBson());
}
};
@@ -1737,15 +1739,15 @@ template <class OnDiskFormat>
class PreferBalanceLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$30:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1753,7 +1755,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1769,15 +1771,15 @@ template <class OnDiskFormat>
class PreferBalanceRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$1:null},"
"$20:{$11:null,$12:null,$13:null,$14:null},"
"_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1785,7 +1787,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x12, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1801,15 +1803,15 @@ template <class OnDiskFormat>
class RecursiveMergeThenBalance : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},"
"_:{$20:null,$30:null,$40:null,$50:null,"
"$60:null,$70:null,$80:null,$90:null}}");
- ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -1817,7 +1819,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x7, 800));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -1832,12 +1834,12 @@ template <class OnDiskFormat>
class DelEmptyNoNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{b:{a:null}}");
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1846,7 +1848,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1859,12 +1861,12 @@ template <class OnDiskFormat>
class DelEmptyEmptyNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1873,7 +1875,7 @@ public:
<< "b");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, NULL, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1886,13 +1888,13 @@ template <class OnDiskFormat>
class DelInternal : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1902,7 +1904,7 @@ public:
<< "c");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1916,17 +1918,17 @@ template <class OnDiskFormat>
class DelInternalReplaceWithUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,c:{b:null},d:null}");
const DiskLoc prevChildBucket =
- this->getKey(this->_helper.headManager.getHead(&txn), 1).prevChildBucket;
+ this->getKey(this->_helper.headManager.getHead(&opCtx), 1).prevChildBucket;
this->markKeyUnused(prevChildBucket, 0);
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1937,7 +1939,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1952,13 +1954,13 @@ template <class OnDiskFormat>
class DelInternalReplaceRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{b:null}}");
long long unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -1969,7 +1971,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
@@ -1983,13 +1985,13 @@ template <class OnDiskFormat>
class DelInternalPromoteKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}");
long long unused = 0;
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
@@ -2000,7 +2002,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2014,13 +2016,13 @@ template <class OnDiskFormat>
class DelInternalPromoteRightKey : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{e:{c:null},_:{f:null}}}");
long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2031,7 +2033,7 @@ public:
ASSERT(this->unindex(k));
unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -2045,13 +2047,13 @@ template <class OnDiskFormat>
class DelInternalReplacementPrevNonNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,d:{c:{b:null}},e:null}");
long long unused = 0;
- ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2061,7 +2063,7 @@ public:
<< "d");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2070,7 +2072,7 @@ public:
builder.checkStructure("{a:null,d:{c:{b:null}},e:null}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 1).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&opCtx), 1).recordLoc.getOfs() & 1);
}
};
@@ -2078,13 +2080,13 @@ template <class OnDiskFormat>
class DelInternalReplacementNextNonNull : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree("{a:null,_:{c:null,_:{d:null}}}");
long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2094,7 +2096,7 @@ public:
<< "a");
ASSERT(this->unindex(k));
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
@@ -2103,7 +2105,7 @@ public:
builder.checkStructure("{a:null,_:{c:null,_:{d:null}}}");
// Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 0).recordLoc.getOfs() & 1);
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&opCtx), 0).recordLoc.getOfs() & 1);
}
};
@@ -2111,15 +2113,15 @@ template <class OnDiskFormat>
class DelInternalSplitPromoteLeft : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},"
"$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2128,7 +2130,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2145,15 +2147,15 @@ template <class OnDiskFormat>
class DelInternalSplitPromoteRight : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ OperationContextNoop opCtx;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
builder.makeTree(
"{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,"
"$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}");
long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2162,7 +2164,7 @@ public:
const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
ASSERT(this->unindex(k));
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&opCtx, &unused, true, false, 0));
// The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
@@ -2178,8 +2180,8 @@ template <class OnDiskFormat>
class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('a');
this->insert(key1, this->_helper.dummyDiskLoc);
@@ -2189,7 +2191,7 @@ public:
this->insert(key3, this->_helper.dummyDiskLoc);
this->checkValidNumKeys(3);
- this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&opCtx), 1);
}
};
@@ -2197,8 +2199,8 @@ template <class OnDiskFormat>
class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('a');
this->insert(key1, this->_helper.dummyDiskLoc);
@@ -2216,27 +2218,27 @@ template <class OnDiskFormat>
class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ OperationContextNoop opCtx;
+ this->_helper.btree.initAsEmpty(&opCtx);
BSONObj key1 = simpleKey('z');
ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Attempt to insert a dup key/value, which is okay.
ASSERT_EQUALS(Status::OK(), this->insert(key1, this->_helper.dummyDiskLoc, true));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Attempt to insert a dup key/value with dupsAllowed=false.
ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
this->insert(key1, this->_helper.dummyDiskLoc, false));
this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
// Add another record to produce another diskloc.
- StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&txn, "a", 1, false);
+ StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&opCtx, "a", 1, false);
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
@@ -2252,7 +2254,7 @@ public:
this->checkValidNumKeys(2);
// Clean up.
- this->_helper.recordStore.deleteRecord(&txn, s.getValue());
+ this->_helper.recordStore.deleteRecord(&opCtx, s.getValue());
ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
}
};
@@ -2330,14 +2332,14 @@ public:
}
long long unused = 0;
- ASSERT_EQUALS( 0, bt()->fullValidate(&txn, dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, bt()->fullValidate(&opCtx, dl(), order(), &unused, true ) );
for ( long long i = 50000; i < 50100; ++i ) {
insert( i );
}
long long unused2 = 0;
- ASSERT_EQUALS( 100, bt()->fullValidate(&txn, dl(), order(), &unused2, true ) );
+ ASSERT_EQUALS( 100, bt()->fullValidate(&opCtx, dl(), order(), &unused2, true ) );
// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
//
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
index 663075e5cb8..e02a01923f7 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
@@ -73,9 +73,9 @@ BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
// Generate a valid record location for a "fake" record, which we will repeatedly use
// thoughout the tests.
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
StatusWith<RecordId> s =
- recordStore.insertRecord(&txn, randomData.c_str(), randomData.length(), false);
+ recordStore.insertRecord(&opCtx, randomData.c_str(), randomData.length(), false);
ASSERT_TRUE(s.isOK());
ASSERT_EQUALS(1, recordStore.numRecords(NULL));
@@ -90,13 +90,13 @@ BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::makeTree(const string& spec) {
- _helper->headManager.setHead(_txn, makeTree(fromjson(spec)).toRecordId());
+ _helper->headManager.setHead(_opCtx, makeTree(fromjson(spec)).toRecordId());
}
template <class OnDiskFormat>
DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj& spec) {
- DiskLoc bucketLoc = _helper->btree._addBucket(_txn);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ DiskLoc bucketLoc = _helper->btree._addBucket(_opCtx);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
BSONObjIterator i(spec);
while (i.more()) {
@@ -114,13 +114,13 @@ DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj& spec) {
}
}
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+ _helper->btree.fixParentPtrs(_opCtx, bucket, bucketLoc);
return bucketLoc;
}
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const string& spec) const {
- checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_txn)));
+ checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_opCtx)));
}
template <class OnDiskFormat>
@@ -128,16 +128,16 @@ void ArtificialTreeBuilder<OnDiskFormat>::push(const DiskLoc bucketLoc,
const BSONObj& key,
const DiskLoc child) {
KeyDataOwnedType k(key);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, k, child));
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+ _helper->btree.fixParentPtrs(_opCtx, bucket, bucketLoc);
}
template <class OnDiskFormat>
void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const BSONObj& spec,
const DiskLoc node) const {
- BucketType* bucket = _helper->btree.getBucket(_txn, node);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, node);
BSONObjIterator j(spec);
for (int i = 0; i < bucket->n; ++i) {
@@ -172,8 +172,8 @@ template <class OnDiskFormat>
bool ArtificialTreeBuilder<OnDiskFormat>::isPresent(const BSONObj& key, int direction) const {
int pos;
DiskLoc loc;
- OperationContextNoop txn;
- return _helper->btree.locate(&txn, key, _helper->dummyDiskLoc, direction, &pos, &loc);
+ OperationContextNoop opCtx;
+ return _helper->btree.locate(&opCtx, key, _helper->dummyDiskLoc, direction, &pos, &loc);
}
// Static
@@ -200,7 +200,7 @@ int ArtificialTreeBuilder<OnDiskFormat>::fillBucketToExactSize(const DiskLoc buc
char startKey) {
ASSERT_FALSE(bucketLoc.isNull());
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ BucketType* bucket = _helper->btree.getBucket(_opCtx, bucketLoc);
ASSERT_EQUALS(0, bucket->n);
static const int bigSize = KeyDataOwnedType(simpleKey('a', 801)).dataSize();
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
index 5aeec516528..c5d48b48b3a 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
@@ -53,11 +53,11 @@ BSONObj simpleKey(char c, int n = 1);
*/
class TestHeadManager : public HeadManager {
public:
- virtual const RecordId getHead(OperationContext* txn) const {
+ virtual const RecordId getHead(OperationContext* opCtx) const {
return _head;
}
- virtual void setHead(OperationContext* txn, const RecordId newHead) {
+ virtual void setHead(OperationContext* opCtx, const RecordId newHead) {
_head = newHead;
}
@@ -100,8 +100,8 @@ public:
* does not do any cleanup, so constructing multiple trees over the same helper will
* cause leaked records.
*/
- ArtificialTreeBuilder(OperationContext* txn, BtreeLogicTestHelper<OnDiskFormat>* helper)
- : _txn(txn), _helper(helper) {}
+ ArtificialTreeBuilder(OperationContext* opCtx, BtreeLogicTestHelper<OnDiskFormat>* helper)
+ : _opCtx(opCtx), _helper(helper) {}
/**
* Causes the specified tree shape to be built on the associated helper and the tree's
@@ -143,7 +143,7 @@ private:
static std::string expectedKey(const char* spec);
- OperationContext* _txn;
+ OperationContext* _opCtx;
BtreeLogicTestHelper<OnDiskFormat>* _helper;
};
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
index e22453b99db..f873e6a4d3a 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
@@ -61,25 +61,25 @@ public:
return 0;
}
- void kill(OperationContext* txn, const Namespace& k) {
+ void kill(OperationContext* opCtx, const Namespace& k) {
bool found;
int i = _find(k, found);
if (i >= 0 && found) {
Node* n = &_nodes(i);
- n = txn->recoveryUnit()->writing(n);
+ n = opCtx->recoveryUnit()->writing(n);
n->key.kill();
n->setUnused();
}
}
/** returns false if too full */
- bool put(OperationContext* txn, const Namespace& k, const NamespaceDetails& value) {
+ bool put(OperationContext* opCtx, const Namespace& k, const NamespaceDetails& value) {
bool found;
int i = _find(k, found);
if (i < 0)
return false;
- Node* n = txn->recoveryUnit()->writing(&_nodes(i));
+ Node* n = opCtx->recoveryUnit()->writing(&_nodes(i));
if (!found) {
n->key = k;
n->hash = k.hash();
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index d7f19c49f16..293fa482c41 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -82,13 +82,13 @@ NamespaceDetails::NamespaceDetails(const DiskLoc& loc, bool capped) {
memset(_reserved, 0, sizeof(_reserved));
}
-NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* txn,
+NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* opCtx,
StringData ns,
NamespaceIndex& ni,
int nindexessofar) {
// Namespace details must always be changed under an exclusive DB lock
const NamespaceString nss(ns);
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
verify(i >= 0 && i <= 1);
@@ -101,18 +101,18 @@ NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* txn,
Extra temp;
temp.init();
- ni.add_ns(txn, extrans, reinterpret_cast<NamespaceDetails*>(&temp));
+ ni.add_ns(opCtx, extrans, reinterpret_cast<NamespaceDetails*>(&temp));
Extra* e = reinterpret_cast<NamespaceDetails::Extra*>(ni.details(extrans));
long ofs = e->ofsFrom(this);
if (i == 0) {
verify(_extraOffset == 0);
- *txn->recoveryUnit()->writing(&_extraOffset) = ofs;
+ *opCtx->recoveryUnit()->writing(&_extraOffset) = ofs;
verify(extra() == e);
} else {
Extra* hd = extra();
verify(hd->next(this) == 0);
- hd->setNext(txn, ofs);
+ hd->setNext(opCtx, ofs);
}
return e;
}
@@ -176,7 +176,7 @@ NamespaceDetails::IndexIterator::IndexIterator(const NamespaceDetails* _d,
}
// must be called when renaming a NS to fix up extra
-void NamespaceDetails::copyingFrom(OperationContext* txn,
+void NamespaceDetails::copyingFrom(OperationContext* opCtx,
StringData thisns,
NamespaceIndex& ni,
NamespaceDetails* src) {
@@ -184,35 +184,35 @@ void NamespaceDetails::copyingFrom(OperationContext* txn,
Extra* se = src->extra();
int n = NIndexesBase;
if (se) {
- Extra* e = allocExtra(txn, thisns, ni, n);
+ Extra* e = allocExtra(opCtx, thisns, ni, n);
while (1) {
n += NIndexesExtra;
e->copy(this, *se);
se = se->next(src);
if (se == 0)
break;
- Extra* nxt = allocExtra(txn, thisns, ni, n);
- e->setNext(txn, nxt->ofsFrom(this));
+ Extra* nxt = allocExtra(opCtx, thisns, ni, n);
+ e->setNext(opCtx, nxt->ofsFrom(this));
e = nxt;
}
verify(_extraOffset);
}
}
-NamespaceDetails* NamespaceDetails::writingWithoutExtra(OperationContext* txn) {
- return txn->recoveryUnit()->writing(this);
+NamespaceDetails* NamespaceDetails::writingWithoutExtra(OperationContext* opCtx) {
+ return opCtx->recoveryUnit()->writing(this);
}
// XXX - this method should go away
-NamespaceDetails* NamespaceDetails::writingWithExtra(OperationContext* txn) {
+NamespaceDetails* NamespaceDetails::writingWithExtra(OperationContext* opCtx) {
for (Extra* e = extra(); e; e = e->next(this)) {
- txn->recoveryUnit()->writing(e);
+ opCtx->recoveryUnit()->writing(e);
}
- return writingWithoutExtra(txn);
+ return writingWithoutExtra(opCtx);
}
-void NamespaceDetails::setMaxCappedDocs(OperationContext* txn, long long max) {
+void NamespaceDetails::setMaxCappedDocs(OperationContext* opCtx, long long max) {
massert(16499,
"max in a capped collection has to be < 2^31 or -1",
CollectionOptions::validMaxCappedDocs(&max));
@@ -222,21 +222,21 @@ void NamespaceDetails::setMaxCappedDocs(OperationContext* txn, long long max) {
/* ------------------------------------------------------------------------- */
-int NamespaceDetails::_catalogFindIndexByName(OperationContext* txn,
+int NamespaceDetails::_catalogFindIndexByName(OperationContext* opCtx,
const Collection* coll,
StringData name,
bool includeBackgroundInProgress) const {
IndexIterator i = ii(includeBackgroundInProgress);
while (i.more()) {
- const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId()).value();
+ const BSONObj obj = coll->docFor(opCtx, i.next().info.toRecordId()).value();
if (name == obj.getStringField("name"))
return i.pos() - 1;
}
return -1;
}
-void NamespaceDetails::Extra::setNext(OperationContext* txn, long ofs) {
- *txn->recoveryUnit()->writing(&_next) = ofs;
+void NamespaceDetails::Extra::setNext(OperationContext* opCtx, long ofs) {
+ *opCtx->recoveryUnit()->writing(&_next) = ofs;
}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index f1196bcd166..cf82703a25d 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -153,7 +153,7 @@ public:
return 0;
return (Extra*)(((char*)d) + _next);
}
- void setNext(OperationContext* txn, long ofs);
+ void setNext(OperationContext* opCtx, long ofs);
void copy(NamespaceDetails* d, const Extra& e) {
memcpy(this, &e, sizeof(Extra));
_next = 0;
@@ -165,15 +165,18 @@ public:
return (Extra*)(((char*)this) + _extraOffset);
}
/* add extra space for indexes when more than 10 */
- Extra* allocExtra(OperationContext* txn, StringData ns, NamespaceIndex& ni, int nindexessofar);
+ Extra* allocExtra(OperationContext* opCtx,
+ StringData ns,
+ NamespaceIndex& ni,
+ int nindexessofar);
- void copyingFrom(OperationContext* txn,
+ void copyingFrom(OperationContext* opCtx,
StringData thisns,
NamespaceIndex& ni,
NamespaceDetails* src); // must be called when renaming a NS to fix up extra
public:
- void setMaxCappedDocs(OperationContext* txn, long long max);
+ void setMaxCappedDocs(OperationContext* opCtx, long long max);
enum UserFlags {
Flag_UsePowerOf2Sizes = 1 << 0,
@@ -210,12 +213,12 @@ public:
* This fetches the IndexDetails for the next empty index slot. The caller must populate
* returned object. This handles allocating extra index space, if necessary.
*/
- IndexDetails& getNextIndexDetails(OperationContext* txn, Collection* collection);
+ IndexDetails& getNextIndexDetails(OperationContext* opCtx, Collection* collection);
- NamespaceDetails* writingWithoutExtra(OperationContext* txn);
+ NamespaceDetails* writingWithoutExtra(OperationContext* opCtx);
/** Make all linked Extra objects writeable as well */
- NamespaceDetails* writingWithExtra(OperationContext* txn);
+ NamespaceDetails* writingWithExtra(OperationContext* opCtx);
/**
* Returns the offset of the specified index name within the array of indexes. Must be
@@ -223,7 +226,7 @@ public:
*
* @return > 0 if index name was found, -1 otherwise.
*/
- int _catalogFindIndexByName(OperationContext* txn,
+ int _catalogFindIndexByName(OperationContext* opCtx,
const Collection* coll,
StringData name,
bool includeBackgroundInProgress) const;
@@ -234,7 +237,7 @@ private:
* a and b are 2 index ids, whose contents will be swapped
* must have a lock on the entire collection to do this
*/
- void swapIndex(OperationContext* txn, int a, int b);
+ void swapIndex(OperationContext* opCtx, int a, int b);
friend class IndexCatalog;
friend class IndexCatalogEntry;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index 7d92f431db8..ee2031d4a7e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -65,8 +65,8 @@ NamespaceDetailsCollectionCatalogEntry::NamespaceDetailsCollectionCatalogEntry(
}
CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(
- OperationContext* txn) const {
- CollectionOptions options = _db->getCollectionOptions(txn, _namespacesRecordId);
+ OperationContext* opCtx) const {
+ CollectionOptions options = _db->getCollectionOptions(opCtx, _namespacesRecordId);
if (options.flagsSet) {
if (options.flags != _details->userFlags) {
@@ -84,11 +84,11 @@ CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(
return options;
}
-int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
+int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount(OperationContext* opCtx) const {
return _details->nIndexes + _details->indexBuildsInProgress;
}
-int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
+int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount(OperationContext* opCtx) const {
return _details->nIndexes;
}
@@ -96,22 +96,22 @@ int NamespaceDetailsCollectionCatalogEntry::getMaxAllowedIndexes() const {
return NamespaceDetails::NIndexesMax;
}
-void NamespaceDetailsCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::getAllIndexes(OperationContext* opCtx,
std::vector<std::string>* names) const {
NamespaceDetails::IndexIterator i = _details->ii(true);
while (i.more()) {
const IndexDetails& id = i.next();
- const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
names->push_back(obj.getStringField("name"));
}
}
-bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* opCtx,
StringData idxName,
MultikeyPaths* multikeyPaths) const {
// TODO SERVER-22727: Populate 'multikeyPaths' with path components that cause 'idxName' to be
// multikey.
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
return isIndexMultikey(idxNo);
}
@@ -121,16 +121,16 @@ bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(int idxNo) const {
}
bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(
- OperationContext* txn, StringData indexName, const MultikeyPaths& multikeyPaths) {
+ OperationContext* opCtx, StringData indexName, const MultikeyPaths& multikeyPaths) {
// TODO SERVER-22727: Store new path components from 'multikeyPaths' that cause 'indexName' to
// be multikey.
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
invariant(idxNo >= 0);
const bool multikey = true;
- return setIndexIsMultikey(txn, idxNo, multikey);
+ return setIndexIsMultikey(opCtx, idxNo, multikey);
}
-bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* opCtx,
int idxNo,
bool multikey) {
unsigned long long mask = 1ULL << idxNo;
@@ -141,7 +141,7 @@ bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext
return false;
}
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
+ *opCtx->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
} else {
// Shortcut if the bit is already set correctly
if (!(_details->multiKeyIndexBits & mask)) {
@@ -150,49 +150,49 @@ bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext
// Invert mask: all 1's except a 0 at the ith bit
mask = ~mask;
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
+ *opCtx->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
}
return true;
}
-RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
return _details->idx(idxNo).head.toRecordId();
}
-BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
const IndexDetails& id = _details->idx(idxNo);
- return _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ return _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
}
-void NamespaceDetailsCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::setIndexHead(OperationContext* opCtx,
StringData idxName,
const RecordId& newHead) {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
- *txn->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
+ *opCtx->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
}
-bool NamespaceDetailsCollectionCatalogEntry::isIndexReady(OperationContext* txn,
+bool NamespaceDetailsCollectionCatalogEntry::isIndexReady(OperationContext* opCtx,
StringData idxName) const {
- int idxNo = _findIndexNumber(txn, idxName);
+ int idxNo = _findIndexNumber(opCtx, idxName);
invariant(idxNo >= 0);
- return idxNo < getCompletedIndexCount(txn);
+ return idxNo < getCompletedIndexCount(opCtx);
}
-int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber(OperationContext* txn,
+int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber(OperationContext* opCtx,
StringData idxName) const {
NamespaceDetails::IndexIterator i = _details->ii(true);
while (i.more()) {
const IndexDetails& id = i.next();
int idxNo = i.pos() - 1;
- const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ const BSONObj obj = _indexRecordStore->dataFor(opCtx, id.info.toRecordId()).toBson();
if (idxName == obj.getStringField("name"))
return idxNo;
}
@@ -221,29 +221,29 @@ public:
}
} iu_unittest;
-Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* txn,
+Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* opCtx,
StringData indexName) {
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
if (idxNo < 0)
return Status(ErrorCodes::NamespaceNotFound, "index not found to remove");
RecordId infoLocation = _details->idx(idxNo).info.toRecordId();
{ // sanity check
- BSONObj info = _indexRecordStore->dataFor(txn, infoLocation).toBson();
+ BSONObj info = _indexRecordStore->dataFor(opCtx, infoLocation).toBson();
invariant(info["name"].String() == indexName);
}
{ // drop the namespace
string indexNamespace = IndexDescriptor::makeIndexNamespace(ns().ns(), indexName);
- Status status = _db->dropCollection(txn, indexNamespace);
+ Status status = _db->dropCollection(opCtx, indexNamespace);
if (!status.isOK()) {
return status;
}
}
{ // all info in the .ns file
- NamespaceDetails* d = _details->writingWithExtra(txn);
+ NamespaceDetails* d = _details->writingWithExtra(opCtx);
// fix the _multiKeyIndexBits, by moving all bits above me down one
d->multiKeyIndexBits = removeAndSlideBit(d->multiKeyIndexBits, idxNo);
@@ -253,100 +253,100 @@ Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* txn
else
d->nIndexes--;
- for (int i = idxNo; i < getTotalIndexCount(txn); i++)
+ for (int i = idxNo; i < getTotalIndexCount(opCtx); i++)
d->idx(i) = d->idx(i + 1);
- d->idx(getTotalIndexCount(txn)) = IndexDetails();
+ d->idx(getTotalIndexCount(opCtx)) = IndexDetails();
}
// Someone may be querying the system.indexes namespace directly, so we need to invalidate
// its cursors.
MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- txn, NamespaceString(_db->name(), "system.indexes"), infoLocation);
+ opCtx, NamespaceString(_db->name(), "system.indexes"), infoLocation);
// remove from system.indexes
- _indexRecordStore->deleteRecord(txn, infoLocation);
+ _indexRecordStore->deleteRecord(opCtx, infoLocation);
return Status::OK();
}
-Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild(OperationContext* opCtx,
const IndexDescriptor* desc) {
BSONObj spec = desc->infoObj();
// 1) entry in system.indexs
StatusWith<RecordId> systemIndexesEntry =
- _indexRecordStore->insertRecord(txn, spec.objdata(), spec.objsize(), false);
+ _indexRecordStore->insertRecord(opCtx, spec.objdata(), spec.objsize(), false);
if (!systemIndexesEntry.isOK())
return systemIndexesEntry.getStatus();
// 2) NamespaceDetails mods
IndexDetails* id;
try {
- id = &_details->idx(getTotalIndexCount(txn), true);
+ id = &_details->idx(getTotalIndexCount(opCtx), true);
} catch (DBException&) {
- _details->allocExtra(txn, ns().ns(), _db->_namespaceIndex, getTotalIndexCount(txn));
- id = &_details->idx(getTotalIndexCount(txn), false);
+ _details->allocExtra(opCtx, ns().ns(), _db->_namespaceIndex, getTotalIndexCount(opCtx));
+ id = &_details->idx(getTotalIndexCount(opCtx), false);
}
const DiskLoc infoLoc = DiskLoc::fromRecordId(systemIndexesEntry.getValue());
- *txn->recoveryUnit()->writing(&id->info) = infoLoc;
- *txn->recoveryUnit()->writing(&id->head) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&id->info) = infoLoc;
+ *opCtx->recoveryUnit()->writing(&id->head) = DiskLoc();
- txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) += 1;
+ opCtx->recoveryUnit()->writingInt(_details->indexBuildsInProgress) += 1;
// 3) indexes entry in .ns file and system.namespaces
- _db->createNamespaceForIndex(txn, desc->indexNamespace());
+ _db->createNamespaceForIndex(opCtx, desc->indexNamespace());
// TODO SERVER-22727: Create an entry for path-level multikey info when creating the new index.
// Mark the collation feature as in use if the index has a non-simple collation.
if (spec["collation"]) {
- _db->markCollationFeatureAsInUse(txn);
+ _db->markCollationFeatureAsInUse(opCtx);
}
return Status::OK();
}
-void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess(OperationContext* opCtx,
StringData indexName) {
- int idxNo = _findIndexNumber(txn, indexName);
+ int idxNo = _findIndexNumber(opCtx, indexName);
fassert(17202, idxNo >= 0);
// Make sure the newly created index is relocated to nIndexes, if it isn't already there
- if (idxNo != getCompletedIndexCount(txn)) {
- int toIdxNo = getCompletedIndexCount(txn);
+ if (idxNo != getCompletedIndexCount(opCtx)) {
+ int toIdxNo = getCompletedIndexCount(opCtx);
- //_details->swapIndex( txn, idxNo, toIdxNo );
+ //_details->swapIndex( opCtx, idxNo, toIdxNo );
// flip main meta data
IndexDetails temp = _details->idx(idxNo);
- *txn->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
- *txn->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
+ *opCtx->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
+ *opCtx->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
// flip multi key bits
bool tempMultikey = isIndexMultikey(idxNo);
- setIndexIsMultikey(txn, idxNo, isIndexMultikey(toIdxNo));
- setIndexIsMultikey(txn, toIdxNo, tempMultikey);
+ setIndexIsMultikey(opCtx, idxNo, isIndexMultikey(toIdxNo));
+ setIndexIsMultikey(opCtx, toIdxNo, tempMultikey);
idxNo = toIdxNo;
- invariant((idxNo = _findIndexNumber(txn, indexName)));
+ invariant((idxNo = _findIndexNumber(opCtx, indexName)));
}
- txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) -= 1;
- txn->recoveryUnit()->writingInt(_details->nIndexes) += 1;
+ opCtx->recoveryUnit()->writingInt(_details->indexBuildsInProgress) -= 1;
+ opCtx->recoveryUnit()->writingInt(_details->nIndexes) += 1;
- invariant(isIndexReady(txn, indexName));
+ invariant(isIndexReady(opCtx, indexName));
}
-void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) {
- int idx = _findIndexNumber(txn, idxName);
+ int idx = _findIndexNumber(opCtx, idxName);
invariant(idx >= 0);
IndexDetails& indexDetails = _details->idx(idx);
- BSONObj obj = _indexRecordStore->dataFor(txn, indexDetails.info.toRecordId()).toBson();
+ BSONObj obj = _indexRecordStore->dataFor(opCtx, indexDetails.info.toRecordId()).toBson();
const BSONElement oldExpireSecs = obj.getField("expireAfterSeconds");
// Important that we set the new value in-place. We are writing directly to the
@@ -358,14 +358,14 @@ void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext*
massert(16631, "index does not have an 'expireAfterSeconds' field", false);
break;
case NumberInt:
- *txn->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds;
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds;
break;
case NumberDouble:
- *txn->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) =
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) =
newExpireSeconds;
break;
case NumberLong:
- *txn->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) =
+ *opCtx->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) =
newExpireSeconds;
break;
default:
@@ -373,65 +373,66 @@ void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext*
}
}
-void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::_updateSystemNamespaces(OperationContext* opCtx,
const BSONObj& update) {
if (!_namespacesRecordStore)
return;
- RecordData entry = _namespacesRecordStore->dataFor(txn, _namespacesRecordId);
+ RecordData entry = _namespacesRecordStore->dataFor(opCtx, _namespacesRecordId);
const BSONObj newEntry = applyUpdateOperators(entry.releaseToBson(), update);
Status result = _namespacesRecordStore->updateRecord(
- txn, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL);
+ opCtx, _namespacesRecordId, newEntry.objdata(), newEntry.objsize(), false, NULL);
if (ErrorCodes::NeedsDocumentMove == result) {
StatusWith<RecordId> newLocation = _namespacesRecordStore->insertRecord(
- txn, newEntry.objdata(), newEntry.objsize(), false);
+ opCtx, newEntry.objdata(), newEntry.objsize(), false);
fassert(40074, newLocation.getStatus().isOK());
// Invalidate old namespace record
MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- txn, NamespaceString(_db->name(), "system.namespaces"), _namespacesRecordId);
+ opCtx, NamespaceString(_db->name(), "system.namespaces"), _namespacesRecordId);
- _namespacesRecordStore->deleteRecord(txn, _namespacesRecordId);
+ _namespacesRecordStore->deleteRecord(opCtx, _namespacesRecordId);
- setNamespacesRecordId(txn, newLocation.getValue());
+ setNamespacesRecordId(opCtx, newLocation.getValue());
} else {
fassert(17486, result.isOK());
}
}
-void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
+void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* opCtx, int newValue) {
NamespaceDetailsRSV1MetaData md(ns().ns(), _details);
- md.replaceUserFlags(txn, newValue);
- _updateSystemNamespaces(txn, BSON("$set" << BSON("options.flags" << newValue)));
+ md.replaceUserFlags(opCtx, newValue);
+ _updateSystemNamespaces(opCtx, BSON("$set" << BSON("options.flags" << newValue)));
}
-void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) {
_updateSystemNamespaces(
- txn,
+ opCtx,
BSON("$set" << BSON("options.validator" << validator << "options.validationLevel"
<< validationLevel
<< "options.validationAction"
<< validationAction)));
}
-void NamespaceDetailsCollectionCatalogEntry::setNamespacesRecordId(OperationContext* txn,
+void NamespaceDetailsCollectionCatalogEntry::setNamespacesRecordId(OperationContext* opCtx,
RecordId newId) {
if (newId.isNull()) {
invariant(ns().coll() == "system.namespaces" || ns().coll() == "system.indexes");
} else {
- // 'txn' is allowed to be null, but we don't need an OperationContext in MMAP, so that's OK.
- auto namespaceEntry = _namespacesRecordStore->dataFor(txn, newId).releaseToBson();
+ // 'opCtx' is allowed to be null, but we don't need an OperationContext in MMAP, so that's
+ // OK.
+ auto namespaceEntry = _namespacesRecordStore->dataFor(opCtx, newId).releaseToBson();
invariant(namespaceEntry["name"].String() == ns().ns());
// Register RecordId change for rollback if we're not initializing.
- if (txn && !_namespacesRecordId.isNull()) {
+ if (opCtx && !_namespacesRecordId.isNull()) {
auto oldNamespacesRecordId = _namespacesRecordId;
- txn->recoveryUnit()->onRollback([=] { _namespacesRecordId = oldNamespacesRecordId; });
+ opCtx->recoveryUnit()->onRollback([=] { _namespacesRecordId = oldNamespacesRecordId; });
}
_namespacesRecordId = newId;
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
index d1fc4e948c1..06b370bfa1f 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
@@ -55,63 +55,63 @@ public:
~NamespaceDetailsCollectionCatalogEntry() {}
- CollectionOptions getCollectionOptions(OperationContext* txn) const final;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx) const final;
- int getTotalIndexCount(OperationContext* txn) const final;
+ int getTotalIndexCount(OperationContext* opCtx) const final;
- int getCompletedIndexCount(OperationContext* txn) const final;
+ int getCompletedIndexCount(OperationContext* opCtx) const final;
int getMaxAllowedIndexes() const final;
- void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const final;
+ void getAllIndexes(OperationContext* opCtx, std::vector<std::string>* names) const final;
- BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const final;
+ BSONObj getIndexSpec(OperationContext* opCtx, StringData idxName) const final;
- bool isIndexMultikey(OperationContext* txn,
+ bool isIndexMultikey(OperationContext* opCtx,
StringData indexName,
MultikeyPaths* multikeyPaths) const final;
bool isIndexMultikey(int idxNo) const;
- bool setIndexIsMultikey(OperationContext* txn, int idxNo, bool multikey = true);
- bool setIndexIsMultikey(OperationContext* txn,
+ bool setIndexIsMultikey(OperationContext* opCtx, int idxNo, bool multikey = true);
+ bool setIndexIsMultikey(OperationContext* opCtx,
StringData indexName,
const MultikeyPaths& multikeyPaths) final;
- RecordId getIndexHead(OperationContext* txn, StringData indexName) const final;
+ RecordId getIndexHead(OperationContext* opCtx, StringData indexName) const final;
- void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
+ void setIndexHead(OperationContext* opCtx, StringData indexName, const RecordId& newHead) final;
- bool isIndexReady(OperationContext* txn, StringData indexName) const final;
+ bool isIndexReady(OperationContext* opCtx, StringData indexName) const final;
- Status removeIndex(OperationContext* txn, StringData indexName) final;
+ Status removeIndex(OperationContext* opCtx, StringData indexName) final;
- Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
+ Status prepareForIndexBuild(OperationContext* opCtx, const IndexDescriptor* spec) final;
- void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
+ void indexBuildSuccess(OperationContext* opCtx, StringData indexName) final;
- void updateTTLSetting(OperationContext* txn,
+ void updateTTLSetting(OperationContext* opCtx,
StringData idxName,
long long newExpireSeconds) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateFlags(OperationContext* opCtx, int newValue) final;
- void updateValidator(OperationContext* txn,
+ void updateValidator(OperationContext* opCtx,
const BSONObj& validator,
StringData validationLevel,
StringData validationAction) final;
// not part of interface, but available to my storage engine
- int _findIndexNumber(OperationContext* txn, StringData indexName) const;
+ int _findIndexNumber(OperationContext* opCtx, StringData indexName) const;
RecordId getNamespacesRecordId() {
return _namespacesRecordId;
}
/**
- * 'txn' is only allowed to be null when called from the constructor.
+ * 'opCtx' is only allowed to be null when called from the constructor.
*/
- void setNamespacesRecordId(OperationContext* txn, RecordId newId);
+ void setNamespacesRecordId(OperationContext* opCtx, RecordId newId);
private:
NamespaceDetails* _details;
@@ -127,7 +127,7 @@ private:
* Updates the entry for this namespace in '_namespacesRecordStore', updating
* '_namespacesRecordId' if necessary.
*/
- void _updateSystemNamespaces(OperationContext* txn, const BSONObj& update);
+ void _updateSystemNamespaces(OperationContext* opCtx, const BSONObj& update);
friend class MMAPV1DatabaseCatalogEntry;
};
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
index eaa3a1cf958..7d5f1805d68 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
@@ -48,16 +48,17 @@ const DiskLoc& NamespaceDetailsRSV1MetaData::capExtent() const {
return _details->capExtent;
}
-void NamespaceDetailsRSV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->capExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setCapExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->capExtent) = loc;
}
const DiskLoc& NamespaceDetailsRSV1MetaData::capFirstNewRecord() const {
return _details->capFirstNewRecord;
}
-void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->capFirstNewRecord) = loc;
+void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord(OperationContext* opCtx,
+ const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->capFirstNewRecord) = loc;
}
bool NamespaceDetailsRSV1MetaData::capLooped() const {
@@ -71,19 +72,19 @@ long long NamespaceDetailsRSV1MetaData::numRecords() const {
return _details->stats.nrecords;
}
-void NamespaceDetailsRSV1MetaData::incrementStats(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) {
// durability todo : this could be a bit annoying / slow to record constantly
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ NamespaceDetails::Stats* s = opCtx->recoveryUnit()->writing(&_details->stats);
s->datasize += dataSizeIncrement;
s->nrecords += numRecordsIncrement;
}
-void NamespaceDetailsRSV1MetaData::setStats(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setStats(OperationContext* opCtx,
long long dataSize,
long long numRecords) {
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ NamespaceDetails::Stats* s = opCtx->recoveryUnit()->writing(&_details->stats);
s->datasize = dataSize;
s->nrecords = numRecords;
}
@@ -103,45 +104,45 @@ DiskLoc NamespaceDetailsRSV1MetaData::deletedListEntry(int bucket) const {
return head;
}
-void NamespaceDetailsRSV1MetaData::setDeletedListEntry(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setDeletedListEntry(OperationContext* opCtx,
int bucket,
const DiskLoc& loc) {
DiskLoc* head = (bucket < NamespaceDetails::SmallBuckets)
? &_details->deletedListSmall[bucket]
: &_details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
- *txn->recoveryUnit()->writing(head) = loc;
+ *opCtx->recoveryUnit()->writing(head) = loc;
}
DiskLoc NamespaceDetailsRSV1MetaData::deletedListLegacyGrabBag() const {
return _details->deletedListLegacyGrabBag;
}
-void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* opCtx,
const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
+ *opCtx->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
}
-void NamespaceDetailsRSV1MetaData::orphanDeletedList(OperationContext* txn) {
+void NamespaceDetailsRSV1MetaData::orphanDeletedList(OperationContext* opCtx) {
for (int i = 0; i < RecordStoreV1Base::Buckets; i++) {
- setDeletedListEntry(txn, i, DiskLoc());
+ setDeletedListEntry(opCtx, i, DiskLoc());
}
- setDeletedListLegacyGrabBag(txn, DiskLoc());
+ setDeletedListLegacyGrabBag(opCtx, DiskLoc());
}
-const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent(OperationContext* txn) const {
+const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent(OperationContext* opCtx) const {
return _details->firstExtent;
}
-void NamespaceDetailsRSV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->firstExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->firstExtent) = loc;
}
-const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent(OperationContext* txn) const {
+const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent(OperationContext* opCtx) const {
return _details->lastExtent;
}
-void NamespaceDetailsRSV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->lastExtent) = loc;
+void NamespaceDetailsRSV1MetaData::setLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ *opCtx->recoveryUnit()->writing(&_details->lastExtent) = loc;
}
bool NamespaceDetailsRSV1MetaData::isCapped() const {
@@ -156,38 +157,38 @@ int NamespaceDetailsRSV1MetaData::userFlags() const {
return _details->userFlags;
}
-bool NamespaceDetailsRSV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+bool NamespaceDetailsRSV1MetaData::setUserFlag(OperationContext* opCtx, int flag) {
if ((_details->userFlags & flag) == flag)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) |= flag;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) |= flag;
return true;
}
-bool NamespaceDetailsRSV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+bool NamespaceDetailsRSV1MetaData::clearUserFlag(OperationContext* opCtx, int flag) {
if ((_details->userFlags & flag) == 0)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
return true;
}
-bool NamespaceDetailsRSV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+bool NamespaceDetailsRSV1MetaData::replaceUserFlags(OperationContext* opCtx, int flags) {
if (_details->userFlags == flags)
return false;
- txn->recoveryUnit()->writingInt(_details->userFlags) = flags;
+ opCtx->recoveryUnit()->writingInt(_details->userFlags) = flags;
return true;
}
-int NamespaceDetailsRSV1MetaData::lastExtentSize(OperationContext* txn) const {
+int NamespaceDetailsRSV1MetaData::lastExtentSize(OperationContext* opCtx) const {
return _details->lastExtentSize;
}
-void NamespaceDetailsRSV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+void NamespaceDetailsRSV1MetaData::setLastExtentSize(OperationContext* opCtx, int newMax) {
if (_details->lastExtentSize == newMax)
return;
- txn->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
+ opCtx->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
}
long long NamespaceDetailsRSV1MetaData::maxCappedDocs() const {
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
index a6fde4807b5..26f0a16803f 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
@@ -51,46 +51,46 @@ public:
virtual ~NamespaceDetailsRSV1MetaData() {}
virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc);
virtual bool capLooped() const;
virtual long long dataSize() const;
virtual long long numRecords() const;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement);
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords);
virtual DiskLoc deletedListEntry(int bucket) const;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc);
virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual void orphanDeletedList(OperationContext* opCtx);
- virtual const DiskLoc& firstExtent(OperationContext* txn) const;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc);
- virtual const DiskLoc& lastExtent(OperationContext* txn) const;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual bool isCapped() const;
virtual bool isUserFlagSet(int flag) const;
virtual int userFlags() const;
- virtual bool setUserFlag(OperationContext* txn, int flag);
- virtual bool clearUserFlag(OperationContext* txn, int flag);
- virtual bool replaceUserFlags(OperationContext* txn, int flags);
+ virtual bool setUserFlag(OperationContext* opCtx, int flag);
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag);
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags);
- virtual int lastExtentSize(OperationContext* txn) const;
- virtual void setLastExtentSize(OperationContext* txn, int newMax);
+ virtual int lastExtentSize(OperationContext* opCtx) const;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax);
virtual long long maxCappedDocs() const;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index 173c2afceca..fe9704e0e2d 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -51,10 +51,10 @@ using std::endl;
using std::list;
using std::string;
-NamespaceIndex::NamespaceIndex(OperationContext* txn,
+NamespaceIndex::NamespaceIndex(OperationContext* opCtx,
const std::string& dir,
const std::string& database)
- : _dir(dir), _database(database), _f(txn, MongoFile::Options::SEQUENTIAL), _ht(nullptr) {}
+ : _dir(dir), _database(database), _f(opCtx, MongoFile::Options::SEQUENTIAL), _ht(nullptr) {}
NamespaceIndex::~NamespaceIndex() {}
@@ -67,33 +67,38 @@ NamespaceDetails* NamespaceIndex::details(const Namespace& ns) const {
return _ht->get(ns);
}
-void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped) {
+void NamespaceIndex::add_ns(OperationContext* opCtx,
+ StringData ns,
+ const DiskLoc& loc,
+ bool capped) {
NamespaceDetails details(loc, capped);
- add_ns(txn, ns, &details);
+ add_ns(opCtx, ns, &details);
}
-void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details) {
+void NamespaceIndex::add_ns(OperationContext* opCtx,
+ StringData ns,
+ const NamespaceDetails* details) {
Namespace n(ns);
- add_ns(txn, n, details);
+ add_ns(opCtx, n, details);
}
-void NamespaceIndex::add_ns(OperationContext* txn,
+void NamespaceIndex::add_ns(OperationContext* opCtx,
const Namespace& ns,
const NamespaceDetails* details) {
const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
massert(17315, "no . in ns", nsIsFull(nss.toString()));
- uassert(10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
+ uassert(10081, "too many namespaces/collections", _ht->put(opCtx, ns, *details));
}
-void NamespaceIndex::kill_ns(OperationContext* txn, StringData ns) {
+void NamespaceIndex::kill_ns(OperationContext* opCtx, StringData ns) {
const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_X));
const Namespace n(ns);
- _ht->kill(txn, n);
+ _ht->kill(opCtx, n);
if (ns.size() <= Namespace::MaxNsColletionLen) {
// Larger namespace names don't have room for $extras so they can't exist. The code
@@ -102,7 +107,7 @@ void NamespaceIndex::kill_ns(OperationContext* txn, StringData ns) {
for (int i = 0; i <= 1; i++) {
try {
Namespace extra(n.extraName(i));
- _ht->kill(txn, extra);
+ _ht->kill(opCtx, extra);
} catch (DBException&) {
LOG(3) << "caught exception in kill_ns" << endl;
}
@@ -147,7 +152,7 @@ void NamespaceIndex::maybeMkdir() const {
"create dir for db ");
}
-void NamespaceIndex::init(OperationContext* txn) {
+void NamespaceIndex::init(OperationContext* opCtx) {
invariant(!_ht.get());
unsigned long long len = 0;
@@ -158,7 +163,7 @@ void NamespaceIndex::init(OperationContext* txn) {
void* p = 0;
if (boost::filesystem::exists(nsPath)) {
- if (_f.open(txn, pathString)) {
+ if (_f.open(opCtx, pathString)) {
len = _f.length();
if (len % (1024 * 1024) != 0) {
@@ -217,7 +222,7 @@ void NamespaceIndex::init(OperationContext* txn) {
massert(18826, str::stream() << "failure writing file " << pathString, !file.bad());
}
- if (_f.create(txn, pathString, l)) {
+ if (_f.create(opCtx, pathString, l)) {
// The writes done in this function must not be rolled back. This will leave the
// file empty, but available for future use. That is why we go directly to the
// global dur dirty list rather than going through the OperationContext.
@@ -226,7 +231,7 @@ void NamespaceIndex::init(OperationContext* txn) {
// Commit the journal and all changes to disk so that even if exceptions occur
// during subsequent initialization, we won't have uncommited changes during file
// close.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
len = l;
invariant(len == mmapv1GlobalOptions.lenForNewNsFiles);
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
index 51aae08ea61..5b7766b4035 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
@@ -51,30 +51,30 @@ class NamespaceIndex {
MONGO_DISALLOW_COPYING(NamespaceIndex);
public:
- NamespaceIndex(OperationContext* txn, const std::string& dir, const std::string& database);
+ NamespaceIndex(OperationContext* opCtx, const std::string& dir, const std::string& database);
~NamespaceIndex();
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- LockMongoFilesExclusive lock(txn);
- _f.close(txn);
+ void close(OperationContext* opCtx) {
+ LockMongoFilesExclusive lock(opCtx);
+ _f.close(opCtx);
}
/* returns true if the file represented by this file exists on disk */
bool pathExists() const;
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
- void add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped);
- void add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details);
- void add_ns(OperationContext* txn, const Namespace& ns, const NamespaceDetails* details);
+ void add_ns(OperationContext* opCtx, StringData ns, const DiskLoc& loc, bool capped);
+ void add_ns(OperationContext* opCtx, StringData ns, const NamespaceDetails* details);
+ void add_ns(OperationContext* opCtx, const Namespace& ns, const NamespaceDetails* details);
NamespaceDetails* details(StringData ns) const;
NamespaceDetails* details(const Namespace& ns) const;
- void kill_ns(OperationContext* txn, StringData ns);
+ void kill_ns(OperationContext* opCtx, StringData ns);
bool allocated() const {
return _ht.get() != 0;
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index d81aa591817..46af46c0a47 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -104,14 +104,14 @@ int DataFile::_defaultSize() const {
}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
-Status DataFile::openExisting(OperationContext* txn, const char* filename) {
+Status DataFile::openExisting(OperationContext* opCtx, const char* filename) {
invariant(_mb == 0);
if (!boost::filesystem::exists(filename)) {
return Status(ErrorCodes::InvalidPath, "DataFile::openExisting - file does not exist");
}
- if (!mmf.open(txn, filename)) {
+ if (!mmf.open(opCtx, filename)) {
return Status(ErrorCodes::InternalError, "DataFile::openExisting - mmf.open failed");
}
@@ -138,7 +138,7 @@ Status DataFile::openExisting(OperationContext* txn, const char* filename) {
return Status::OK();
}
-void DataFile::open(OperationContext* txn,
+void DataFile::open(OperationContext* opCtx,
const char* filename,
int minSize,
bool preallocateOnly) {
@@ -170,7 +170,7 @@ void DataFile::open(OperationContext* txn,
{
invariant(_mb == 0);
unsigned long long sz = size;
- if (mmf.create(txn, filename, sz)) {
+ if (mmf.create(opCtx, filename, sz)) {
_mb = mmf.getView();
}
@@ -179,14 +179,14 @@ void DataFile::open(OperationContext* txn,
}
data_file_check(_mb);
- header()->init(txn, _fileNo, size, filename);
+ header()->init(opCtx, _fileNo, size, filename);
}
void DataFile::flush(bool sync) {
mmf.flush(sync);
}
-DiskLoc DataFile::allocExtentArea(OperationContext* txn, int size) {
+DiskLoc DataFile::allocExtentArea(OperationContext* opCtx, int size) {
// The header would be NULL if file open failed. However, if file open failed we should
// never be entering here.
invariant(header());
@@ -195,15 +195,18 @@ DiskLoc DataFile::allocExtentArea(OperationContext* txn, int size) {
int offset = header()->unused.getOfs();
DataFileHeader* h = header();
- *txn->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
- txn->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
+ *opCtx->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
+ opCtx->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
return DiskLoc(_fileNo, offset);
}
// -------------------------------------------------------------------------------
-void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, const char* filename) {
+void DataFileHeader::init(OperationContext* opCtx,
+ int fileno,
+ int filelength,
+ const char* filename) {
if (uninitialized()) {
DEV log() << "datafileheader::init initializing " << filename << " n:" << fileno << endl;
@@ -233,17 +236,17 @@ void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, con
freeListStart.Null();
freeListEnd.Null();
} else {
- checkUpgrade(txn);
+ checkUpgrade(opCtx);
}
}
-void DataFileHeader::checkUpgrade(OperationContext* txn) {
+void DataFileHeader::checkUpgrade(OperationContext* opCtx) {
if (freeListStart == DiskLoc(0, 0)) {
// we are upgrading from 2.4 to 2.6
invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
- WriteUnitOfWork wunit(txn);
- *txn->recoveryUnit()->writing(&freeListStart) = DiskLoc();
- *txn->recoveryUnit()->writing(&freeListEnd) = DiskLoc();
+ WriteUnitOfWork wunit(opCtx);
+ *opCtx->recoveryUnit()->writing(&freeListStart) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&freeListEnd) = DiskLoc();
wunit.commit();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index 57b5fb223f9..60dc095791e 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -182,9 +182,9 @@ public:
return version.majorRaw() == 0;
}
- void init(OperationContext* txn, int fileno, int filelength, const char* filename);
+ void init(OperationContext* opCtx, int fileno, int filelength, const char* filename);
- void checkUpgrade(OperationContext* txn);
+ void checkUpgrade(OperationContext* opCtx);
bool isEmpty() const {
return uninitialized() || (unusedLength == fileLength - HeaderSize - 16);
@@ -195,13 +195,13 @@ public:
class DataFile {
public:
- DataFile(OperationContext* txn, int fn) : _fileNo(fn), mmf(txn), _mb(NULL) {}
+ DataFile(OperationContext* opCtx, int fn) : _fileNo(fn), mmf(opCtx), _mb(NULL) {}
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
- Status openExisting(OperationContext* txn, const char* filename);
+ Status openExisting(OperationContext* opCtx, const char* filename);
/** creates if DNE */
- void open(OperationContext* txn,
+ void open(OperationContext* opCtx,
const char* filename,
int requestedDataSize = 0,
bool preallocateOnly = false);
@@ -209,12 +209,12 @@ public:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- LockMongoFilesExclusive lock(txn);
- mmf.close(txn);
+ void close(OperationContext* opCtx) {
+ LockMongoFilesExclusive lock(opCtx);
+ mmf.close(opCtx);
}
- DiskLoc allocExtentArea(OperationContext* txn, int size);
+ DiskLoc allocExtentArea(OperationContext* opCtx, int size);
DataFileHeader* getHeader() {
return header();
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index e1bc51d29f3..ab7dca95ff9 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -81,12 +81,12 @@ void DataFileSync::run() {
break;
}
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
Date_t start = jsTime();
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
dur::notifyPreDataFileFlush();
- int numFiles = storageEngine->flushAllFiles(txn.get(), true);
+ int numFiles = storageEngine->flushAllFiles(opCtx.get(), true);
dur::notifyPostDataFileFlush();
time_flushing = durationCount<Milliseconds>(jsTime() - start);
@@ -100,7 +100,7 @@ void DataFileSync::run() {
}
}
-BSONObj DataFileSync::generateSection(OperationContext* txn,
+BSONObj DataFileSync::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
if (!running()) {
return BSONObj();
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.h b/src/mongo/db/storage/mmap_v1/data_file_sync.h
index b204fdad019..a26624f2c41 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.h
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.h
@@ -49,7 +49,8 @@ public:
void run();
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
void _flushed(int ms);
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 1ed496c3a6d..599e397f944 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -165,7 +165,8 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
if (!getDur().isDurable()) {
return BSONObj();
}
@@ -193,18 +194,18 @@ public:
virtual bool waitUntilDurable() {
return false;
}
- virtual bool commitNow(OperationContext* txn) {
+ virtual bool commitNow(OperationContext* opCtx) {
return false;
}
virtual bool commitIfNeeded() {
return false;
}
- virtual void syncDataAndTruncateJournal(OperationContext* txn) {}
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx) {}
virtual bool isDurable() const {
return false;
}
virtual void closingFileNotification() {}
- virtual void commitAndStopDurThread(OperationContext* txn) {}
+ virtual void commitAndStopDurThread(OperationContext* opCtx) {}
};
@@ -219,14 +220,14 @@ public:
virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents);
virtual void createdFile(const std::string& filename, unsigned long long len);
virtual bool waitUntilDurable();
- virtual bool commitNow(OperationContext* txn);
+ virtual bool commitNow(OperationContext* opCtx);
virtual bool commitIfNeeded();
- virtual void syncDataAndTruncateJournal(OperationContext* txn);
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx);
virtual bool isDurable() const {
return true;
}
virtual void closingFileNotification();
- virtual void commitAndStopDurThread(OperationContext* txn);
+ virtual void commitAndStopDurThread(OperationContext* opCtx);
void start(ClockSource* cs, int64_t serverStartMs);
@@ -318,7 +319,7 @@ void debugValidateFileMapsMatch(const DurableMappedFile* mmf) {
/**
* Main code of the remap private view function.
*/
-void remapPrivateViewImpl(OperationContext* txn, double fraction) {
+void remapPrivateViewImpl(OperationContext* opCtx, double fraction) {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
// There is no way that the set of files can change while we are in this method, because
@@ -335,9 +336,9 @@ void remapPrivateViewImpl(OperationContext* txn, double fraction) {
// See SERVER-5680 to see why this code is necessary on Windows.
// See SERVER-8795 to see why this code is necessary on Solaris.
#if defined(_WIN32) || defined(__sun)
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
#else
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
#endif
std::set<MongoFile*>& files = MongoFile::getAllFiles();
@@ -381,7 +382,7 @@ void remapPrivateViewImpl(OperationContext* txn, double fraction) {
}
if (mmf->willNeedRemap()) {
- mmf->remapThePrivateView(txn);
+ mmf->remapThePrivateView(opCtx);
}
i++;
@@ -517,10 +518,10 @@ DurableInterface::~DurableInterface() {}
// DurableImpl
//
-bool DurableImpl::commitNow(OperationContext* txn) {
+bool DurableImpl::commitNow(OperationContext* opCtx) {
CommitNotifier::When when = commitNotify.now();
- AutoYieldFlushLockForMMAPV1Commit flushLockYield(txn->lockState());
+ AutoYieldFlushLockForMMAPV1Commit flushLockYield(opCtx->lockState());
// There is always just one waiting anyways
flushRequested.notify_one();
@@ -562,15 +563,15 @@ bool DurableImpl::commitIfNeeded() {
return true;
}
-void DurableImpl::syncDataAndTruncateJournal(OperationContext* txn) {
- invariant(txn->lockState()->isW());
+void DurableImpl::syncDataAndTruncateJournal(OperationContext* opCtx) {
+ invariant(opCtx->lockState()->isW());
// Once this returns, all the outstanding journal has been applied to the data files and
// so it's safe to do the flushAll/journalCleanup below.
- commitNow(txn);
+ commitNow(opCtx);
// Flush the shared view to disk.
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -588,7 +589,7 @@ void DurableImpl::closingFileNotification() {
}
}
-void DurableImpl::commitAndStopDurThread(OperationContext* txn) {
+void DurableImpl::commitAndStopDurThread(OperationContext* opCtx) {
CommitNotifier::When when = commitNotify.now();
// There is always just one waiting anyways
@@ -600,7 +601,7 @@ void DurableImpl::commitAndStopDurThread(OperationContext* txn) {
applyToDataFilesNotify.waitFor(when);
// Flush the shared view to disk.
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
// Once the shared view has been flushed, we do not need the journal files anymore.
journalCleanup(true);
@@ -630,14 +631,14 @@ void DurableImpl::start(ClockSource* cs, int64_t serverStartMs) {
* @param fraction Value between (0, 1] indicating what fraction of the memory to remap.
* Remapping too much or too frequently incurs copy-on-write page fault cost.
*/
-static void remapPrivateView(OperationContext* txn, double fraction) {
+static void remapPrivateView(OperationContext* opCtx, double fraction) {
// Remapping private views must occur after WRITETODATAFILES otherwise we wouldn't see any
// newly written data on reads.
invariant(!commitJob.hasWritten());
try {
Timer t;
- remapPrivateViewImpl(txn, fraction);
+ remapPrivateViewImpl(opCtx, fraction);
stats.curr()->_remapPrivateViewMicros += t.micros();
LOG(4) << "remapPrivateView end";
@@ -725,9 +726,9 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
Timer t;
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
- AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(txn.lockState());
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
+ AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(opCtx.lockState());
// We need to snapshot the commitNumber after the flush lock has been obtained,
// because at this point we know that we have a stable snapshot of the data.
@@ -828,7 +829,7 @@ static void durThread(ClockSource* cs, int64_t serverStartMs) {
// accessing it. Technically this step could be avoided on systems, which
// support atomic remap.
autoFlushLock.upgradeFlushLockToExclusive();
- remapPrivateView(txnPtr.get(), remapFraction);
+ remapPrivateView(opCtxPtr.get(), remapFraction);
autoFlushLock.release();
diff --git a/src/mongo/db/storage/mmap_v1/dur.h b/src/mongo/db/storage/mmap_v1/dur.h
index e4aec954749..b505de833f6 100644
--- a/src/mongo/db/storage/mmap_v1/dur.h
+++ b/src/mongo/db/storage/mmap_v1/dur.h
@@ -86,7 +86,7 @@ public:
@return true if --dur is on.
@return false if --dur is off. (in which case there is action)
*/
- virtual bool commitNow(OperationContext* txn) = 0;
+ virtual bool commitNow(OperationContext* opCtx) = 0;
/** Commit if enough bytes have been modified. Current threshold is 50MB
@@ -112,7 +112,7 @@ public:
*
* Must be called under the global X lock.
*/
- virtual void commitAndStopDurThread(OperationContext* txn) = 0;
+ virtual void commitAndStopDurThread(OperationContext* opCtx) = 0;
/**
* Commits pending changes, flushes all changes to main data files, then removes the
@@ -125,7 +125,7 @@ public:
* through recovery and be applied to files that have had changes made after this call
* applied.
*/
- virtual void syncDataAndTruncateJournal(OperationContext* txn) = 0;
+ virtual void syncDataAndTruncateJournal(OperationContext* opCtx) = 0;
virtual bool isDurable() const = 0;
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
index 5c9fe117d52..8e7ef2a0cb5 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
@@ -56,14 +56,14 @@ namespace {
* (2) TODO should we do this using N threads? Would be quite easy see Hackenberg paper table
* 5 and 6. 2 threads might be a good balance.
*/
-void WRITETODATAFILES(OperationContext* txn,
+void WRITETODATAFILES(OperationContext* opCtx,
const JSectHeader& h,
const AlignedBuilder& uncompressed) {
Timer t;
LOG(4) << "WRITETODATAFILES BEGIN";
- RecoveryJob::get().processSection(txn, &h, uncompressed.buf(), uncompressed.len(), NULL);
+ RecoveryJob::get().processSection(opCtx, &h, uncompressed.buf(), uncompressed.len(), NULL);
const long long m = t.micros();
stats.curr()->_writeToDataFilesMicros += m;
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index cdd3d4e3db2..7a2b05a379f 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -268,23 +268,23 @@ RecoveryJob::~RecoveryJob() {
invariant(!"RecoveryJob is intentionally leaked with a bare call to operator new()");
}
-void RecoveryJob::close(OperationContext* txn) {
+void RecoveryJob::close(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mx);
- _close(txn);
+ _close(opCtx);
}
-void RecoveryJob::_close(OperationContext* txn) {
- MongoFile::flushAll(txn, true);
- LockMongoFilesExclusive lock(txn);
+void RecoveryJob::_close(OperationContext* opCtx) {
+ MongoFile::flushAll(opCtx, true);
+ LockMongoFilesExclusive lock(opCtx);
for (auto& durFile : _mmfs) {
- durFile->close(txn);
+ durFile->close(opCtx);
}
_mmfs.clear();
}
-RecoveryJob::Last::Last(OperationContext* txn) : _txn(txn), mmf(NULL), fileNo(-1) {
+RecoveryJob::Last::Last(OperationContext* opCtx) : _opCtx(opCtx), mmf(NULL), fileNo(-1) {
// Make sure the files list does not change from underneath
- LockMongoFilesShared::assertAtLeastReadLocked(txn);
+ LockMongoFilesShared::assertAtLeastReadLocked(opCtx);
}
DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& entry,
@@ -296,7 +296,7 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
string fn = fileName(entry.dbName, num);
MongoFile* file;
{
- MongoFileFinder finder(_txn); // must release lock before creating new DurableMappedFile
+ MongoFileFinder finder(_opCtx); // must release lock before creating new DurableMappedFile
file = finder.findByPath(fn);
}
@@ -308,8 +308,8 @@ DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& en
log() << "journal error applying writes, file " << fn << " is not open" << endl;
verify(false);
}
- std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile(_txn));
- verify(sp->open(_txn, fn));
+ std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile(_opCtx));
+ verify(sp->open(_opCtx, fn));
rj._mmfs.push_back(sp);
mmf = sp.get();
}
@@ -363,14 +363,14 @@ void RecoveryJob::applyEntry(Last& last, const ParsedJournalEntry& entry, bool a
}
if (apply) {
if (entry.op->needFilesClosed()) {
- _close(last.txn()); // locked in processSection
+ _close(last.opCtx()); // locked in processSection
}
entry.op->replay();
}
}
}
-void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournalEntry>& entries) {
+void RecoveryJob::applyEntries(OperationContext* opCtx, const vector<ParsedJournalEntry>& entries) {
const bool apply = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) == 0;
const bool dump = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal);
@@ -378,7 +378,7 @@ void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournal
log() << "BEGIN section" << endl;
}
- Last last(txn);
+ Last last(opCtx);
for (vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
applyEntry(last, *i, apply, dump);
}
@@ -388,12 +388,12 @@ void RecoveryJob::applyEntries(OperationContext* txn, const vector<ParsedJournal
}
}
-void RecoveryJob::processSection(OperationContext* txn,
+void RecoveryJob::processSection(OperationContext* opCtx,
const JSectHeader* h,
const void* p,
unsigned len,
const JSectFooter* f) {
- LockMongoFilesShared lkFiles(txn); // for RecoveryJob::Last
+ LockMongoFilesShared lkFiles(opCtx); // for RecoveryJob::Last
stdx::lock_guard<stdx::mutex> lk(_mx);
if (_recovering) {
@@ -467,14 +467,14 @@ void RecoveryJob::processSection(OperationContext* txn,
}
// got all the entries for one group commit. apply them:
- applyEntries(txn, entries);
+ applyEntries(opCtx, entries);
}
/** apply a specific journal file, that is already mmap'd
@param p start of the memory mapped file
@return true if this is detected to be the last file (ends abruptly)
*/
-bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsigned len) {
+bool RecoveryJob::processFileBuffer(OperationContext* opCtx, const void* p, unsigned len) {
try {
unsigned long long fileId;
BufReader br(p, len);
@@ -529,7 +529,8 @@ bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsign
const char* hdr = (const char*)br.skip(h.sectionLenWithPadding());
const char* data = hdr + sizeof(JSectHeader);
const char* footer = data + dataLen;
- processSection(txn, (const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
+ processSection(
+ opCtx, (const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
// ctrl c check
uassert(ErrorCodes::Interrupted,
@@ -550,7 +551,7 @@ bool RecoveryJob::processFileBuffer(OperationContext* txn, const void* p, unsign
}
/** apply a specific journal file */
-bool RecoveryJob::processFile(OperationContext* txn, boost::filesystem::path journalfile) {
+bool RecoveryJob::processFile(OperationContext* opCtx, boost::filesystem::path journalfile) {
log() << "recover " << journalfile.string() << endl;
try {
@@ -564,20 +565,20 @@ bool RecoveryJob::processFile(OperationContext* txn, boost::filesystem::path jou
log() << "recover exception checking filesize" << endl;
}
- MemoryMappedFile f{txn, MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
- ON_BLOCK_EXIT([&f, &txn] {
- LockMongoFilesExclusive lock(txn);
- f.close(txn);
+ MemoryMappedFile f{opCtx, MongoFile::Options::READONLY | MongoFile::Options::SEQUENTIAL};
+ ON_BLOCK_EXIT([&f, &opCtx] {
+ LockMongoFilesExclusive lock(opCtx);
+ f.close(opCtx);
});
- void* p = f.map(txn, journalfile.string().c_str());
+ void* p = f.map(opCtx, journalfile.string().c_str());
massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
- return processFileBuffer(txn, p, (unsigned)f.length());
+ return processFileBuffer(opCtx, p, (unsigned)f.length());
}
/** @param files all the j._0 style files we need to apply for recovery */
-void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& files) {
+void RecoveryJob::go(OperationContext* opCtx, vector<boost::filesystem::path>& files) {
log() << "recover begin" << endl;
- LockMongoFilesExclusive lkFiles(txn); // for RecoveryJob::Last
+ LockMongoFilesExclusive lkFiles(opCtx); // for RecoveryJob::Last
_recovering = true;
// load the last sequence number synced to the datafiles on disk before the last crash
@@ -585,11 +586,11 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
for (unsigned i = 0; i != files.size(); ++i) {
- bool abruptEnd = processFile(txn, files[i]);
+ bool abruptEnd = processFile(opCtx, files[i]);
if (abruptEnd && i + 1 < files.size()) {
log() << "recover error: abrupt end to file " << files[i].string()
<< ", yet it isn't the last journal file" << endl;
- close(txn);
+ close(opCtx);
uasserted(13535, "recover abrupt journal file end");
}
}
@@ -600,7 +601,7 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
<< "Last skipped sections had sequence number " << _lastSeqSkipped;
}
- close(txn);
+ close(opCtx);
if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) {
uasserted(13545,
@@ -615,7 +616,7 @@ void RecoveryJob::go(OperationContext* txn, vector<boost::filesystem::path>& fil
_recovering = false;
}
-void _recover(OperationContext* txn) {
+void _recover(OperationContext* opCtx) {
verify(storageGlobalParams.dur);
boost::filesystem::path p = getJournalDir();
@@ -635,7 +636,7 @@ void _recover(OperationContext* txn) {
return;
}
- RecoveryJob::get().go(txn, journalFiles);
+ RecoveryJob::get().go(opCtx, journalFiles);
}
/** recover from a crash
@@ -645,11 +646,11 @@ void _recover(OperationContext* txn) {
void replayJournalFilesAtStartup() {
// we use a lock so that exitCleanly will wait for us
// to finish (or at least to notice what is up and stop)
- auto txn = cc().makeOperationContext();
- ScopedTransaction transaction(txn.get(), MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ auto opCtx = cc().makeOperationContext();
+ ScopedTransaction transaction(opCtx.get(), MODE_X);
+ Lock::GlobalWrite lk(opCtx->lockState());
- _recover(txn.get()); // throws on interruption
+ _recover(opCtx.get()); // throws on interruption
}
struct BufReaderY {
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.h b/src/mongo/db/storage/mmap_v1/dur_recover.h
index 9447044b607..79ce0b03e5d 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.h
@@ -56,17 +56,17 @@ public:
RecoveryJob();
~RecoveryJob();
- void go(OperationContext* txn, std::vector<boost::filesystem::path>& files);
+ void go(OperationContext* opCtx, std::vector<boost::filesystem::path>& files);
/** @param data data between header and footer. compressed if recovering. */
- void processSection(OperationContext* txn,
+ void processSection(OperationContext* opCtx,
const JSectHeader* h,
const void* data,
unsigned len,
const JSectFooter* f);
// locks and calls _close()
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
static RecoveryJob& get() {
return _instance;
@@ -75,16 +75,16 @@ public:
private:
class Last {
public:
- Last(OperationContext* txn);
+ Last(OperationContext* opCtx);
DurableMappedFile* newEntry(const ParsedJournalEntry&, RecoveryJob&);
- OperationContext* txn() {
- return _txn;
+ OperationContext* opCtx() {
+ return _opCtx;
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
DurableMappedFile* mmf;
std::string dbName;
int fileNo;
@@ -93,10 +93,10 @@ private:
void write(Last& last, const ParsedJournalEntry& entry); // actually writes to the file
void applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump);
- void applyEntries(OperationContext* txn, const std::vector<ParsedJournalEntry>& entries);
- bool processFileBuffer(OperationContext* txn, const void*, unsigned len);
- bool processFile(OperationContext* txn, boost::filesystem::path journalfile);
- void _close(OperationContext* txn); // doesn't lock
+ void applyEntries(OperationContext* opCtx, const std::vector<ParsedJournalEntry>& entries);
+ bool processFileBuffer(OperationContext* opCtx, const void*, unsigned len);
+ bool processFile(OperationContext* opCtx, boost::filesystem::path journalfile);
+ void _close(OperationContext* opCtx); // doesn't lock
// Set of memory mapped files and a mutex to protect them
stdx::mutex _mx;
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 548cb8c9f05..fd199817f11 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -61,7 +61,7 @@ using std::map;
using std::pair;
using std::string;
-void DurableMappedFile::remapThePrivateView(OperationContext* txn) {
+void DurableMappedFile::remapThePrivateView(OperationContext* opCtx) {
verify(storageGlobalParams.dur);
_willNeedRemap = false;
@@ -70,7 +70,7 @@ void DurableMappedFile::remapThePrivateView(OperationContext* txn) {
// so the remove / add isn't necessary and can be removed?
void* old = _view_private;
// privateViews.remove(_view_private);
- _view_private = remapPrivateView(txn, _view_private);
+ _view_private = remapPrivateView(opCtx, _view_private);
// privateViews.add(_view_private, this);
fassert(16112, _view_private == old);
}
@@ -241,24 +241,24 @@ void DurableMappedFile::setPath(const std::string& f) {
_p = RelativePath::fromFullPath(storageGlobalParams.dbpath, prefix);
}
-bool DurableMappedFile::open(OperationContext* txn, const std::string& fname) {
+bool DurableMappedFile::open(OperationContext* opCtx, const std::string& fname) {
LOG(3) << "mmf open " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(txn, fname.c_str());
+ _view_write = map(opCtx, fname.c_str());
fassert(16333, _view_write);
return finishOpening();
}
-bool DurableMappedFile::create(OperationContext* txn,
+bool DurableMappedFile::create(OperationContext* opCtx,
const std::string& fname,
unsigned long long& len) {
LOG(3) << "mmf create " << fname;
invariant(!_view_write);
setPath(fname);
- _view_write = map(txn, fname.c_str(), len);
+ _view_write = map(opCtx, fname.c_str(), len);
fassert(16332, _view_write);
return finishOpening();
}
@@ -285,7 +285,7 @@ bool DurableMappedFile::finishOpening() {
return false;
}
-void DurableMappedFile::close(OperationContext* txn) {
+void DurableMappedFile::close(OperationContext* opCtx) {
try {
LOG(3) << "mmf close " << filename();
@@ -298,14 +298,14 @@ void DurableMappedFile::close(OperationContext* txn) {
privateViews.remove(_view_private, length());
- MemoryMappedFile::close(txn);
+ MemoryMappedFile::close(opCtx);
} catch (...) {
error() << "exception in DurableMappedFile::close";
}
}
-DurableMappedFile::DurableMappedFile(OperationContext* txn, OptionSet options)
- : MemoryMappedFile(txn, options), _willNeedRemap(false) {
+DurableMappedFile::DurableMappedFile(OperationContext* opCtx, OptionSet options)
+ : MemoryMappedFile(opCtx, options), _willNeedRemap(false) {
_view_write = _view_private = 0;
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index 3b9b41dab86..7050156fd25 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -51,16 +51,16 @@ protected:
}
public:
- explicit DurableMappedFile(OperationContext* txn, OptionSet options = NONE);
+ explicit DurableMappedFile(OperationContext* opCtx, OptionSet options = NONE);
virtual ~DurableMappedFile();
/**
* Callers must be holding a `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
/** @return true if opened ok. */
- bool open(OperationContext* txn, const std::string& fname);
+ bool open(OperationContext* opCtx, const std::string& fname);
/** @return file length */
unsigned long long length() const {
@@ -79,7 +79,7 @@ public:
passed length.
@return true for ok
*/
- bool create(OperationContext* txn, const std::string& fname, unsigned long long& len);
+ bool create(OperationContext* opCtx, const std::string& fname, unsigned long long& len);
/* Get the "standard" view (which is the private one).
@return the private view.
@@ -123,7 +123,7 @@ public:
_willNeedRemap = true;
}
- void remapThePrivateView(OperationContext* txn);
+ void remapThePrivateView(OperationContext* opCtx);
virtual bool isDurableMappedFile() {
return true;
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 1ca0ab7b9f1..6b0e18c44f3 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -77,18 +77,18 @@ public:
virtual ~ExtentManager() {}
- virtual void close(OperationContext* txn) = 0;
+ virtual void close(OperationContext* opCtx) = 0;
/**
* opens all current files
*/
- virtual Status init(OperationContext* txn) = 0;
+ virtual Status init(OperationContext* opCtx) = 0;
virtual int numFiles() const = 0;
virtual long long fileSize() const = 0;
// must call Extent::reuse on the returned extent
- virtual DiskLoc allocateExtent(OperationContext* txn,
+ virtual DiskLoc allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) = 0;
@@ -96,13 +96,13 @@ public:
/**
* firstExt has to be == lastExt or a chain
*/
- virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) = 0;
+ virtual void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) = 0;
/**
* frees a single extent
* ignores all fields in the Extent except: magic, myLoc, length
*/
- virtual void freeExtent(OperationContext* txn, DiskLoc extent) = 0;
+ virtual void freeExtent(OperationContext* opCtx, DiskLoc extent) = 0;
/**
* Retrieve statistics on the the free list managed by this ExtentManger.
@@ -110,7 +110,7 @@ public:
* @param totalFreeSizeBytes - non-null pointer to an int64_t receiving the total free
* space in the free list.
*/
- virtual void freeListStats(OperationContext* txn,
+ virtual void freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const = 0;
@@ -188,8 +188,8 @@ public:
*/
virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint) = 0;
- virtual DataFileVersion getFileFormat(OperationContext* txn) const = 0;
- virtual void setFileFormat(OperationContext* txn, DataFileVersion newVersion) = 0;
+ virtual DataFileVersion getFileFormat(OperationContext* opCtx) const = 0;
+ virtual void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) = 0;
virtual const DataFile* getOpenFile(int n) const = 0;
};
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
index eaf0981c688..bf761f2f2f0 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
@@ -42,7 +42,7 @@
namespace mongo {
-RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData HeapRecordStoreBtree::dataFor(OperationContext* opCtx, const RecordId& loc) const {
Records::const_iterator it = _records.find(loc);
invariant(it != _records.end());
const MmapV1RecordHeader& rec = it->second;
@@ -50,7 +50,7 @@ RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId&
return RecordData(rec.data.get(), rec.dataSize);
}
-bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
+bool HeapRecordStoreBtree::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* out) const {
Records::const_iterator it = _records.find(loc);
@@ -61,11 +61,11 @@ bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
return true;
}
-void HeapRecordStoreBtree::deleteRecord(OperationContext* txn, const RecordId& loc) {
+void HeapRecordStoreBtree::deleteRecord(OperationContext* opCtx, const RecordId& loc) {
invariant(_records.erase(loc) == 1);
}
-StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
+StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -75,12 +75,12 @@ StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
const RecordId loc = allocateLoc();
_records[loc] = rec;
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(opCtx, this, loc);
return StatusWith<RecordId>(loc);
}
-Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* txn,
+Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -96,7 +96,7 @@ Status HeapRecordStoreBtree::insertRecordsWithDocWriter(OperationContext* txn,
_records[loc] = rec;
*idsOut = loc;
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(opCtx, this, loc);
return Status::OK();
}
@@ -111,7 +111,7 @@ RecordId HeapRecordStoreBtree::allocateLoc() {
return dl;
}
-Status HeapRecordStoreBtree::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status HeapRecordStoreBtree::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
// not currently called from the tests, but called from btree_logic.h
return Status::OK();
}
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index e01b85db55c..07583680edf 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -49,33 +49,33 @@ public:
// RecordId(0,0) isn't valid for records.
explicit HeapRecordStoreBtree(StringData ns) : RecordStore(ns), _nextId(1) {}
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* out) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _records.size();
}
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
// public methods below here are not necessary to test btree, and will crash when called.
// ------------------------------
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -88,7 +88,7 @@ public:
return true;
}
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -96,17 +96,17 @@ public:
invariant(false);
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final {
invariant(false);
}
- virtual Status truncate(OperationContext* txn) {
+ virtual Status truncate(OperationContext* opCtx) {
invariant(false);
}
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
invariant(false);
}
@@ -114,7 +114,7 @@ public:
invariant(false);
}
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -122,23 +122,23 @@ public:
invariant(false);
}
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
invariant(false);
}
- virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
+ virtual void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota) {
invariant(false);
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const {
invariant(false);
}
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
invariant(false);
}
@@ -154,11 +154,11 @@ public:
invariant(false);
}
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {
invariant(false);
}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(false);
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index 279abae4a38..e4f5452e123 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -84,7 +84,7 @@ public:
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index bdce2bd6468..f8d12295ce3 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -69,18 +69,18 @@ map<string, MongoFile*> pathToFile;
mongo::AtomicUInt64 mmfNextId(0);
} // namespace
-MemoryMappedFile::MemoryMappedFile(OperationContext* txn, OptionSet options)
+MemoryMappedFile::MemoryMappedFile(OperationContext* opCtx, OptionSet options)
: MongoFile(options), _uniqueId(mmfNextId.fetchAndAdd(1)) {
- created(txn);
+ created(opCtx);
}
MemoryMappedFile::~MemoryMappedFile() {
invariant(isClosed());
- auto txn = cc().getOperationContext();
- invariant(txn);
+ auto opCtx = cc().getOperationContext();
+ invariant(opCtx);
- LockMongoFilesShared lock(txn);
+ LockMongoFilesShared lock(opCtx);
for (std::set<MongoFile*>::const_iterator it = mmfiles.begin(); it != mmfiles.end(); it++) {
invariant(*it != this);
}
@@ -88,14 +88,14 @@ MemoryMappedFile::~MemoryMappedFile() {
/*static*/ AtomicUInt64 MemoryMappedFile::totalMappedLength;
-void* MemoryMappedFile::create(OperationContext* txn,
+void* MemoryMappedFile::create(OperationContext* opCtx,
const std::string& filename,
unsigned long long len,
bool zero) {
uassert(13468,
string("can't create file already exists ") + filename,
!boost::filesystem::exists(filename));
- void* p = map(txn, filename.c_str(), len);
+ void* p = map(opCtx, filename.c_str(), len);
fassert(16331, p);
if (zero) {
size_t sz = (size_t)len;
@@ -113,7 +113,7 @@ void* MemoryMappedFile::create(OperationContext* txn,
length = l;
}
-void* MemoryMappedFile::map(OperationContext* txn, const char* filename) {
+void* MemoryMappedFile::map(OperationContext* opCtx, const char* filename) {
unsigned long long l;
try {
l = boost::filesystem::file_size(filename);
@@ -125,7 +125,7 @@ void* MemoryMappedFile::map(OperationContext* txn, const char* filename) {
<< e.what());
}
- void* ret = map(txn, filename, l);
+ void* ret = map(opCtx, filename, l);
fassert(16334, ret);
return ret;
}
@@ -150,14 +150,14 @@ set<MongoFile*>& MongoFile::getAllFiles() {
safe to call more than once, albeit might be wasted work
ideal to call close to the close, if the close is well before object destruction
*/
-void MongoFile::destroyed(OperationContext* txn) {
- LockMongoFilesShared::assertExclusivelyLocked(txn);
+void MongoFile::destroyed(OperationContext* opCtx) {
+ LockMongoFilesShared::assertExclusivelyLocked(opCtx);
mmfiles.erase(this);
pathToFile.erase(filename());
}
/*static*/
-void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
+void MongoFile::closeAllFiles(OperationContext* opCtx, stringstream& message) {
static int closingAllFiles = 0;
if (closingAllFiles) {
message << "warning closingAllFiles=" << closingAllFiles << endl;
@@ -165,26 +165,26 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
}
++closingAllFiles;
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
ProgressMeter pm(mmfiles.size(), 2, 1, "files", "File Closing Progress");
set<MongoFile*> temp = mmfiles;
for (set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++) {
- (*i)->close(txn); // close() now removes from mmfiles
+ (*i)->close(opCtx); // close() now removes from mmfiles
pm.hit();
}
message << "closeAllFiles() finished";
--closingAllFiles;
}
-/*static*/ int MongoFile::flushAll(OperationContext* txn, bool sync) {
- return _flushAll(txn, sync);
+/*static*/ int MongoFile::flushAll(OperationContext* opCtx, bool sync) {
+ return _flushAll(opCtx, sync);
}
-/*static*/ int MongoFile::_flushAll(OperationContext* txn, bool sync) {
+/*static*/ int MongoFile::_flushAll(OperationContext* opCtx, bool sync) {
if (!sync) {
int num = 0;
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
num++;
MongoFile* mmf = *i;
@@ -204,7 +204,7 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
OwnedPointerVector<Flushable> thingsToFlushWrapper;
vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
{
- LockMongoFilesShared lk(txn);
+ LockMongoFilesShared lk(opCtx);
for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
MongoFile* mmf = *i;
if (!mmf)
@@ -214,22 +214,22 @@ void MongoFile::closeAllFiles(OperationContext* txn, stringstream& message) {
}
for (size_t i = 0; i < thingsToFlush.size(); i++) {
- thingsToFlush[i]->flush(txn);
+ thingsToFlush[i]->flush(opCtx);
}
return thingsToFlush.size();
}
-void MongoFile::created(OperationContext* txn) {
+void MongoFile::created(OperationContext* opCtx) {
// If we're a READONLY mapping, we don't want to ever flush.
if (!isOptionSet(READONLY)) {
- LockMongoFilesExclusive lk(txn);
+ LockMongoFilesExclusive lk(opCtx);
mmfiles.insert(this);
}
}
-void MongoFile::setFilename(OperationContext* txn, const std::string& fn) {
- LockMongoFilesExclusive lk(txn);
+void MongoFile::setFilename(OperationContext* opCtx, const std::string& fn) {
+ LockMongoFilesExclusive lk(opCtx);
verify(_filename.empty());
_filename = boost::filesystem::absolute(fn).generic_string();
MongoFile*& ptf = pathToFile[_filename];
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index fc28d56e1d9..b1b32e37d7d 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -71,17 +71,17 @@ class LockMongoFilesShared {
Lock::SharedLock lk;
public:
- explicit LockMongoFilesShared(OperationContext* txn) : lk(txn->lockState(), mmmutex) {
+ explicit LockMongoFilesShared(OperationContext* opCtx) : lk(opCtx->lockState(), mmmutex) {
// JS worker threads may not have cc() setup, as they work on behalf of other clients
- dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
+ dassert(opCtx == cc().getOperationContext() || !cc().getOperationContext());
}
- static void assertExclusivelyLocked(OperationContext* txn) {
- invariant(mmmutex.isExclusivelyLocked(txn->lockState()));
+ static void assertExclusivelyLocked(OperationContext* opCtx) {
+ invariant(mmmutex.isExclusivelyLocked(opCtx->lockState()));
}
- static void assertAtLeastReadLocked(OperationContext* txn) {
- invariant(mmmutex.isAtLeastReadLocked(txn->lockState()));
+ static void assertAtLeastReadLocked(OperationContext* opCtx) {
+ invariant(mmmutex.isAtLeastReadLocked(opCtx->lockState()));
}
/** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
@@ -99,10 +99,10 @@ class LockMongoFilesExclusive {
Lock::ExclusiveLock lk;
public:
- explicit LockMongoFilesExclusive(OperationContext* txn)
- : lk(txn->lockState(), LockMongoFilesShared::mmmutex) {
+ explicit LockMongoFilesExclusive(OperationContext* opCtx)
+ : lk(opCtx->lockState(), LockMongoFilesShared::mmmutex) {
// JS worker threads may not have cc() setup, as they work on behalf of other clients
- dassert(txn == cc().getOperationContext() || !cc().getOperationContext());
+ dassert(opCtx == cc().getOperationContext() || !cc().getOperationContext());
LockMongoFilesShared::era++;
}
};
@@ -116,7 +116,7 @@ public:
class Flushable {
public:
virtual ~Flushable() {}
- virtual void flush(OperationContext* txn) = 0;
+ virtual void flush(OperationContext* opCtx) = 0;
};
enum Options {
@@ -135,7 +135,7 @@ public:
called from within a mutex that MongoFile uses. so be careful not to deadlock.
*/
template <class F>
- static void forEach(OperationContext* txn, F fun);
+ static void forEach(OperationContext* opCtx, F fun);
/**
* note: you need to be in mmmutex when using this. forEach (above) handles that for you
@@ -143,8 +143,8 @@ public:
*/
static std::set<MongoFile*>& getAllFiles();
- static int flushAll(OperationContext* txn, bool sync); // returns n flushed
- static void closeAllFiles(OperationContext* txn, std::stringstream& message);
+ static int flushAll(OperationContext* opCtx, bool sync); // returns n flushed
+ static void closeAllFiles(OperationContext* opCtx, std::stringstream& message);
virtual bool isDurableMappedFile() {
return false;
@@ -153,20 +153,20 @@ public:
std::string filename() const {
return _filename;
}
- void setFilename(OperationContext* txn, const std::string& fn);
+ void setFilename(OperationContext* opCtx, const std::string& fn);
virtual uint64_t getUniqueId() const = 0;
private:
std::string _filename;
- static int _flushAll(OperationContext* txn, bool sync); // returns n flushed
+ static int _flushAll(OperationContext* opCtx, bool sync); // returns n flushed
const OptionSet _options;
protected:
/**
* Implementations may assume this is called from within `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn) = 0;
+ virtual void close(OperationContext* opCtx) = 0;
virtual void flush(bool sync) = 0;
/**
* returns a thread safe object that you can call flush on
@@ -179,7 +179,7 @@ protected:
*/
virtual bool isClosed() = 0;
- void created(OperationContext* txn); /* subclass must call after create */
+ void created(OperationContext* opCtx); /* subclass must call after create */
/**
* Implementations may assume this is called from within `LockMongoFilesExclusive`.
@@ -189,7 +189,7 @@ protected:
* safe to call more than once, albeit might be wasted work
* ideal to call close to the close, if the close is well before object destruction
*/
- void destroyed(OperationContext* txn);
+ void destroyed(OperationContext* opCtx);
virtual unsigned long long length() const = 0;
@@ -208,7 +208,7 @@ class MongoFileFinder {
MONGO_DISALLOW_COPYING(MongoFileFinder);
public:
- MongoFileFinder(OperationContext* txn) : _lk(txn) {}
+ MongoFileFinder(OperationContext* opCtx) : _lk(opCtx) {}
/** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null.
@@ -229,25 +229,25 @@ protected:
}
public:
- MemoryMappedFile(OperationContext* txn, OptionSet options = NONE);
+ MemoryMappedFile(OperationContext* opCtx, OptionSet options = NONE);
virtual ~MemoryMappedFile();
/**
* Callers must be holding a `LockMongoFilesExclusive`.
*/
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
/**
* uasserts if file doesn't exist. fasserts on mmap error.
*/
- void* map(OperationContext* txn, const char* filename);
+ void* map(OperationContext* opCtx, const char* filename);
/**
* uasserts if file exists. fasserts on mmap error.
* @param zero fill file with zeros when true
*/
- void* create(OperationContext* txn,
+ void* create(OperationContext* opCtx,
const std::string& filename,
unsigned long long len,
bool zero);
@@ -307,18 +307,18 @@ protected:
* Creates with length if DNE, otherwise validates input length. Returns nullptr on mmap
* error.
*/
- void* map(OperationContext* txn, const char* filename, unsigned long long& length);
+ void* map(OperationContext* opCtx, const char* filename, unsigned long long& length);
/**
* Close the current private view and open a new replacement. Returns nullptr on mmap error.
*/
- void* remapPrivateView(OperationContext* txn, void* oldPrivateAddr);
+ void* remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr);
};
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
template <class F>
-inline void MongoFile::forEach(OperationContext* txn, F p) {
- LockMongoFilesShared lklk(txn);
+inline void MongoFile::forEach(OperationContext* opCtx, F p) {
+ LockMongoFilesShared lklk(opCtx);
const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles();
for (std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
p(*i);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index 02589421b44..2a9c1cc0458 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -79,7 +79,7 @@ static size_t fetchMinOSPageSizeBytes() {
const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
-void MemoryMappedFile::close(OperationContext* txn) {
+void MemoryMappedFile::close(OperationContext* opCtx) {
for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
munmap(*i, len);
}
@@ -91,7 +91,7 @@ void MemoryMappedFile::close(OperationContext* txn) {
::close(fd);
fd = 0;
}
- destroyed(txn); // cleans up from the master list of mmaps
+ destroyed(opCtx); // cleans up from the master list of mmaps
}
#ifndef O_NOATIME
@@ -159,11 +159,11 @@ MAdvise::~MAdvise() {
}
#endif
-void* MemoryMappedFile::map(OperationContext* txn,
+void* MemoryMappedFile::map(OperationContext* opCtx,
const char* filename,
unsigned long long& length) {
// length may be updated by callee.
- setFilename(txn, filename);
+ setFilename(opCtx, filename);
FileAllocator::get()->allocateAsap(filename, length);
const bool readOnly = isOptionSet(READONLY);
@@ -243,9 +243,9 @@ void* MemoryMappedFile::createPrivateMap() {
return x;
}
-void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
+void* MemoryMappedFile::remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr) {
#if defined(__sun) // SERVER-8795
- LockMongoFilesExclusive lockMongoFiles(txn);
+ LockMongoFilesExclusive lockMongoFiles(opCtx);
#endif
// don't unmap, just mmap over the old region
@@ -288,7 +288,7 @@ public:
PosixFlushable(MemoryMappedFile* theFile, void* view, HANDLE fd, long len)
: _theFile(theFile), _view(view), _fd(fd), _len(len), _id(_theFile->getUniqueId()) {}
- void flush(OperationContext* txn) {
+ void flush(OperationContext* opCtx) {
if (_view == NULL || _fd == 0)
return;
@@ -303,7 +303,7 @@ public:
}
// some error, lets see if we're supposed to exist
- LockMongoFilesShared mmfilesLock(txn);
+ LockMongoFilesShared mmfilesLock(opCtx);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
if ((it == mmfs.end()) || ((*it)->getUniqueId() != _id)) {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 69978fb4b53..c88ed7545c4 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -155,7 +155,7 @@ private:
Entry* const _cachedEntry;
};
-MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
+MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* opCtx,
StringData name,
StringData path,
bool directoryPerDB,
@@ -163,32 +163,32 @@ MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(OperationContext* txn,
std::unique_ptr<ExtentManager> extentManager)
: DatabaseCatalogEntry(name),
_path(path.toString()),
- _namespaceIndex(txn, _path, name.toString()),
+ _namespaceIndex(opCtx, _path, name.toString()),
_extentManager(std::move(extentManager)) {
ScopeGuard onErrorClose = MakeGuard([&] {
- _namespaceIndex.close(txn);
- _extentManager->close(txn);
+ _namespaceIndex.close(opCtx);
+ _extentManager->close(opCtx);
});
massert(34469,
str::stream() << name << " is not a valid database name",
NamespaceString::validDBName(name));
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ invariant(opCtx->lockState()->isDbLockedForMode(name, MODE_X));
try {
// First init the .ns file. If this fails, we may leak the .ns file, but this is OK
// because subsequent openDB will go through this code path again.
- _namespaceIndex.init(txn);
+ _namespaceIndex.init(opCtx);
// Initialize the extent manager. This will create the first data file (.0) if needed
// and if this fails we would leak the .ns file above. Leaking the .ns or .0 file is
// acceptable, because subsequent openDB calls will exercise the code path again.
- Status s = _extentManager->init(txn);
+ Status s = _extentManager->init(opCtx);
if (!s.isOK()) {
msgasserted(16966, str::stream() << "_extentManager->init failed: " << s.toString());
}
// This is the actual loading of the on-disk structures into cache.
- _init(txn);
+ _init(opCtx);
} catch (const DBException& dbe) {
warning() << "database " << path << " " << name
<< " could not be opened due to DBException " << dbe.getCode() << ": "
@@ -230,8 +230,8 @@ void MMAPV1DatabaseCatalogEntry::_removeFromCache(RecoveryUnit* ru, StringData n
_collections.erase(i);
}
-Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringData ns) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X));
NamespaceDetails* details = _namespaceIndex.details(ns);
@@ -242,28 +242,28 @@ Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringD
invariant(details->nIndexes == 0); // TODO: delete instead?
invariant(details->indexBuildsInProgress == 0); // TODO: delete instead?
- _removeNamespaceFromNamespaceCollection(txn, ns);
- _removeFromCache(txn->recoveryUnit(), ns);
+ _removeNamespaceFromNamespaceCollection(opCtx, ns);
+ _removeFromCache(opCtx->recoveryUnit(), ns);
// free extents
if (!details->firstExtent.isNull()) {
- _extentManager->freeExtents(txn, details->firstExtent, details->lastExtent);
- *txn->recoveryUnit()->writing(&details->firstExtent) = DiskLoc().setInvalid();
- *txn->recoveryUnit()->writing(&details->lastExtent) = DiskLoc().setInvalid();
+ _extentManager->freeExtents(opCtx, details->firstExtent, details->lastExtent);
+ *opCtx->recoveryUnit()->writing(&details->firstExtent) = DiskLoc().setInvalid();
+ *opCtx->recoveryUnit()->writing(&details->lastExtent) = DiskLoc().setInvalid();
}
// remove from the catalog hashtable
- _namespaceIndex.kill_ns(txn, ns);
+ _namespaceIndex.kill_ns(opCtx, ns);
return Status::OK();
}
-Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
- Status s = _renameSingleNamespace(txn, fromNS, toNS, stayTemp);
+ Status s = _renameSingleNamespace(opCtx, fromNS, toNS, stayTemp);
if (!s.isOK())
return s;
@@ -271,7 +271,7 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
invariant(details);
RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
- auto cursor = systemIndexRecordStore->getCursor(txn);
+ auto cursor = systemIndexRecordStore->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj oldIndexSpec = record->data.releaseToBson();
if (fromNS != oldIndexSpec["ns"].valuestrsafe())
@@ -292,7 +292,7 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
}
StatusWith<RecordId> newIndexSpecLoc = systemIndexRecordStore->insertRecord(
- txn, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
+ opCtx, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
if (!newIndexSpecLoc.isOK())
return newIndexSpecLoc.getStatus();
@@ -300,10 +300,10 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
{
// Fix the IndexDetails pointer.
- int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(txn, indexName);
+ int indexI = getCollectionCatalogEntry(toNS)->_findIndexNumber(opCtx, indexName);
IndexDetails& indexDetails = details->idx(indexI);
- *txn->recoveryUnit()->writing(&indexDetails.info) =
+ *opCtx->recoveryUnit()->writing(&indexDetails.info) =
DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
}
@@ -312,21 +312,21 @@ Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
std::string oldIndexNs = IndexDescriptor::makeIndexNamespace(fromNS, indexName);
std::string newIndexNs = IndexDescriptor::makeIndexNamespace(toNS, indexName);
- Status s = _renameSingleNamespace(txn, oldIndexNs, newIndexNs, false);
+ Status s = _renameSingleNamespace(opCtx, oldIndexNs, newIndexNs, false);
if (!s.isOK())
return s;
}
// Invalidate index record for the old collection.
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.indexes"), record->id);
+ opCtx, NamespaceString(name(), "system.indexes"), record->id);
- systemIndexRecordStore->deleteRecord(txn, record->id);
+ systemIndexRecordStore->deleteRecord(opCtx, record->id);
}
return Status::OK();
}
-Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp) {
@@ -345,20 +345,20 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
// ----
// this could throw, but if it does we're ok
- _namespaceIndex.add_ns(txn, toNS, fromDetails);
+ _namespaceIndex.add_ns(opCtx, toNS, fromDetails);
NamespaceDetails* toDetails = _namespaceIndex.details(toNS);
try {
- toDetails->copyingFrom(txn, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
+ toDetails->copyingFrom(opCtx, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
} catch (DBException&) {
// could end up here if .ns is full - if so try to clean up / roll back a little
- _namespaceIndex.kill_ns(txn, toNS);
+ _namespaceIndex.kill_ns(opCtx, toNS);
throw;
}
// at this point, code .ns stuff moved
- _namespaceIndex.kill_ns(txn, fromNS);
+ _namespaceIndex.kill_ns(opCtx, fromNS);
fromDetails = NULL;
// fix system.namespaces
@@ -366,7 +366,8 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
RecordId oldSpecLocation = getCollectionCatalogEntry(fromNS)->getNamespacesRecordId();
invariant(!oldSpecLocation.isNull());
{
- BSONObj oldSpec = _getNamespaceRecordStore()->dataFor(txn, oldSpecLocation).releaseToBson();
+ BSONObj oldSpec =
+ _getNamespaceRecordStore()->dataFor(opCtx, oldSpecLocation).releaseToBson();
invariant(!oldSpec.isEmpty());
BSONObjBuilder b;
@@ -383,33 +384,34 @@ Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
newSpec = b.obj();
}
- RecordId rid = _addNamespaceToNamespaceCollection(txn, toNS, newSpec.isEmpty() ? 0 : &newSpec);
+ RecordId rid =
+ _addNamespaceToNamespaceCollection(opCtx, toNS, newSpec.isEmpty() ? 0 : &newSpec);
// Invalidate old namespace record
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
+ opCtx, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
- _getNamespaceRecordStore()->deleteRecord(txn, oldSpecLocation);
+ _getNamespaceRecordStore()->deleteRecord(opCtx, oldSpecLocation);
Entry*& entry = _collections[toNS.toString()];
invariant(entry == NULL);
- txn->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
entry = new Entry();
- _removeFromCache(txn->recoveryUnit(), fromNS);
- _insertInCache(txn, toNS, rid, entry);
+ _removeFromCache(opCtx->recoveryUnit(), fromNS);
+ _insertInCache(opCtx, toNS, rid, entry);
return Status::OK();
}
void MMAPV1DatabaseCatalogEntry::invalidateSystemCollectionRecord(
- OperationContext* txn, NamespaceString systemCollectionNamespace, RecordId record) {
+ OperationContext* opCtx, NamespaceString systemCollectionNamespace, RecordId record) {
// Having to go back up through the DatabaseHolder is a bit of a layering
// violation, but at this point we're not going to add more MMAPv1 specific interfaces.
StringData dbName = systemCollectionNamespace.db();
- invariant(txn->lockState()->isDbLockedForMode(dbName, MODE_X));
- Database* db = dbHolder().get(txn, dbName);
+ invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X));
+ Database* db = dbHolder().get(opCtx, dbName);
Collection* systemCollection = db->getCollection(systemCollectionNamespace);
- systemCollection->getCursorManager()->invalidateDocument(txn, record, INVALIDATION_DELETION);
+ systemCollection->getCursorManager()->invalidateDocument(opCtx, record, INVALIDATION_DELETION);
}
void MMAPV1DatabaseCatalogEntry::appendExtraStats(OperationContext* opCtx,
@@ -491,7 +493,7 @@ void MMAPV1DatabaseCatalogEntry::getCollectionNamespaces(std::list<std::string>*
_namespaceIndex.getCollectionNamespaces(tofill);
}
-void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn, StringData ns) {
+void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* opCtx, StringData ns) {
NamespaceDetails* details = _namespaceIndex.details(ns);
if (details) {
return;
@@ -502,14 +504,14 @@ void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn,
fassertFailed(34372);
}
- _namespaceIndex.add_ns(txn, ns, DiskLoc(), false);
+ _namespaceIndex.add_ns(opCtx, ns, DiskLoc(), false);
}
-void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
+void MMAPV1DatabaseCatalogEntry::_init(OperationContext* opCtx) {
// We wrap the WUOW in an optional as we can't create it if we are in RO mode.
boost::optional<WriteUnitOfWork> wunit;
if (!storageGlobalParams.readOnly) {
- wunit.emplace(txn);
+ wunit.emplace(opCtx);
}
// Upgrade freelist
@@ -524,13 +526,13 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
if (!freeListDetails->firstExtent.isNull()) {
_extentManager->freeExtents(
- txn, freeListDetails->firstExtent, freeListDetails->lastExtent);
+ opCtx, freeListDetails->firstExtent, freeListDetails->lastExtent);
}
- _namespaceIndex.kill_ns(txn, oldFreeList.ns());
+ _namespaceIndex.kill_ns(opCtx, oldFreeList.ns());
}
- DataFileVersion version = _extentManager->getFileFormat(txn);
+ DataFileVersion version = _extentManager->getFileFormat(opCtx);
if (version.isCompatibleWithCurrentCode().isOK() && !version.mayHave30Freelist()) {
if (storageGlobalParams.readOnly) {
severe() << "Legacy storage format detected, but server was started with the "
@@ -540,7 +542,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
// Any DB that can be opened and written to gets this flag set.
version.setMayHave30Freelist();
- _extentManager->setFileFormat(txn, version);
+ _extentManager->setFileFormat(opCtx, version);
}
const NamespaceString nsi(name(), "system.indexes");
@@ -549,16 +551,16 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
bool isSystemNamespacesGoingToBeNew = _namespaceIndex.details(nsn.toString()) == NULL;
bool isSystemIndexesGoingToBeNew = _namespaceIndex.details(nsi.toString()) == NULL;
- _ensureSystemCollection(txn, nsn.toString());
- _ensureSystemCollection(txn, nsi.toString());
+ _ensureSystemCollection(opCtx, nsn.toString());
+ _ensureSystemCollection(opCtx, nsi.toString());
if (isSystemNamespacesGoingToBeNew) {
invariant(!storageGlobalParams.readOnly);
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
}
if (isSystemIndexesGoingToBeNew) {
invariant(!storageGlobalParams.readOnly);
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
}
Entry*& indexEntry = _collections[nsi.toString()];
@@ -578,7 +580,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
NamespaceDetailsRSV1MetaData* md =
new NamespaceDetailsRSV1MetaData(nsn.toString(), nsDetails);
nsEntry->recordStore.reset(
- new SimpleRecordStoreV1(txn, nsn.toString(), md, _extentManager.get(), false));
+ new SimpleRecordStoreV1(opCtx, nsn.toString(), md, _extentManager.get(), false));
}
if (!indexEntry) {
@@ -588,12 +590,12 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
new NamespaceDetailsRSV1MetaData(nsi.toString(), indexDetails);
indexEntry->recordStore.reset(
- new SimpleRecordStoreV1(txn, nsi.toString(), md, _extentManager.get(), true));
+ new SimpleRecordStoreV1(opCtx, nsi.toString(), md, _extentManager.get(), true));
}
RecordId indexNamespaceId;
if (isSystemIndexesGoingToBeNew) {
- indexNamespaceId = _addNamespaceToNamespaceCollection(txn, nsi.toString(), NULL);
+ indexNamespaceId = _addNamespaceToNamespaceCollection(opCtx, nsi.toString(), NULL);
}
if (!nsEntry->catalogEntry) {
@@ -625,7 +627,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant(rs);
- auto cursor = rs->getCursor(txn);
+ auto cursor = rs->getCursor(opCtx);
while (auto record = cursor->next()) {
auto ns = record->data.releaseToBson()["name"].String();
Entry*& entry = _collections[ns];
@@ -635,7 +637,7 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
// because they don't have indexes on them anyway.
if (entry) {
if (entry->catalogEntry->getNamespacesRecordId().isNull()) {
- entry->catalogEntry->setNamespacesRecordId(txn, record->id);
+ entry->catalogEntry->setNamespacesRecordId(opCtx, record->id);
} else {
invariant(entry->catalogEntry->getNamespacesRecordId() == record->id);
}
@@ -643,11 +645,11 @@ void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
}
entry = new Entry();
- _insertInCache(txn, ns, record->id, entry);
+ _insertInCache(opCtx, ns, record->id, entry);
}
}
-Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
+Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace) {
@@ -657,36 +659,36 @@ Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
}
BSONObj optionsAsBSON = options.toBSON();
- RecordId rid = _addNamespaceToNamespaceCollection(txn, ns, &optionsAsBSON);
+ RecordId rid = _addNamespaceToNamespaceCollection(opCtx, ns, &optionsAsBSON);
- _namespaceIndex.add_ns(txn, ns, DiskLoc(), options.capped);
+ _namespaceIndex.add_ns(opCtx, ns, DiskLoc(), options.capped);
NamespaceDetails* details = _namespaceIndex.details(ns);
// Set the flags.
- NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(txn, options.flags);
+ NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(opCtx, options.flags);
if (options.capped && options.cappedMaxDocs > 0) {
- txn->recoveryUnit()->writingInt(details->maxDocsInCapped) = options.cappedMaxDocs;
+ opCtx->recoveryUnit()->writingInt(details->maxDocsInCapped) = options.cappedMaxDocs;
}
Entry*& entry = _collections[ns.toString()];
invariant(!entry);
- txn->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
entry = new Entry();
- _insertInCache(txn, ns, rid, entry);
+ _insertInCache(opCtx, ns, rid, entry);
if (allocateDefaultSpace) {
RecordStoreV1Base* rs = _getRecordStore(ns);
if (options.initialNumExtents > 0) {
int size = _massageExtentSize(_extentManager.get(), options.cappedSize);
for (int i = 0; i < options.initialNumExtents; i++) {
- rs->increaseStorageSize(txn, size, false);
+ rs->increaseStorageSize(opCtx, size, false);
}
} else if (!options.initialExtentSizes.empty()) {
for (size_t i = 0; i < options.initialExtentSizes.size(); i++) {
int size = options.initialExtentSizes[i];
size = _massageExtentSize(_extentManager.get(), size);
- rs->increaseStorageSize(txn, size, false);
+ rs->increaseStorageSize(opCtx, size, false);
}
} else if (options.capped) {
// normal
@@ -694,34 +696,34 @@ Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
// Must do this at least once, otherwise we leave the collection with no
// extents, which is invalid.
int sz = _massageExtentSize(_extentManager.get(),
- options.cappedSize - rs->storageSize(txn));
+ options.cappedSize - rs->storageSize(opCtx));
sz &= 0xffffff00;
- rs->increaseStorageSize(txn, sz, false);
- } while (rs->storageSize(txn) < options.cappedSize);
+ rs->increaseStorageSize(opCtx, sz, false);
+ } while (rs->storageSize(opCtx) < options.cappedSize);
} else {
- rs->increaseStorageSize(txn, _extentManager->initialSize(128), false);
+ rs->increaseStorageSize(opCtx, _extentManager->initialSize(128), false);
}
}
if (!options.collation.isEmpty()) {
- markCollationFeatureAsInUse(txn);
+ markCollationFeatureAsInUse(opCtx);
}
return Status::OK();
}
-void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* txn, StringData name) {
+void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* opCtx, StringData name) {
// This is a simplified form of createCollection.
invariant(!_namespaceIndex.details(name));
- RecordId rid = _addNamespaceToNamespaceCollection(txn, name, NULL);
- _namespaceIndex.add_ns(txn, name, DiskLoc(), false);
+ RecordId rid = _addNamespaceToNamespaceCollection(opCtx, name, NULL);
+ _namespaceIndex.add_ns(opCtx, name, DiskLoc(), false);
Entry*& entry = _collections[name.toString()];
invariant(!entry);
- txn->recoveryUnit()->registerChange(new EntryInsertion(name, this));
+ opCtx->recoveryUnit()->registerChange(new EntryInsertion(name, this));
entry = new Entry();
- _insertInCache(txn, name, rid, entry);
+ _insertInCache(opCtx, name, rid, entry);
}
NamespaceDetailsCollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectionCatalogEntry(
@@ -735,7 +737,7 @@ NamespaceDetailsCollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectio
return i->second->catalogEntry.get();
}
-void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
+void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* opCtx,
StringData ns,
RecordId rid,
Entry* entry) {
@@ -750,10 +752,10 @@ void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
if (details->isCapped) {
entry->recordStore.reset(new CappedRecordStoreV1(
- txn, NULL, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
+ opCtx, NULL, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
} else {
entry->recordStore.reset(new SimpleRecordStoreV1(
- txn, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
+ opCtx, ns, md.release(), _extentManager.get(), nss.coll() == "system.indexes"));
}
}
@@ -771,7 +773,7 @@ RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getRecordStore(StringData ns) co
return i->second->recordStore.get();
}
-IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex(OperationContext* txn,
+IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* entry) {
const std::string& type = entry->descriptor()->getAccessMethodName();
@@ -828,7 +830,7 @@ RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getNamespaceRecordStore() const
return i->second->recordStore.get();
}
-RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* txn,
+RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* opCtx,
StringData ns,
const BSONObj* options) {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
@@ -847,12 +849,12 @@ RecordId MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(Operatio
RecordStoreV1Base* rs = _getNamespaceRecordStore();
invariant(rs);
- StatusWith<RecordId> loc = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> loc = rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false);
massertStatusOK(loc.getStatus());
return loc.getValue();
}
-void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(OperationContext* txn,
+void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(OperationContext* opCtx,
StringData ns) {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
@@ -870,12 +872,12 @@ void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(Operati
// Invalidate old namespace record
RecordId oldSpecLocation = entry->second->catalogEntry->getNamespacesRecordId();
invalidateSystemCollectionRecord(
- txn, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
+ opCtx, NamespaceString(name(), "system.namespaces"), oldSpecLocation);
- rs->deleteRecord(txn, oldSpecLocation);
+ rs->deleteRecord(opCtx, oldSpecLocation);
}
-CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* txn,
+CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* opCtx,
StringData ns) const {
if (nsToCollectionSubstring(ns) == "system.namespaces") {
return {};
@@ -886,10 +888,10 @@ CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationCont
return {};
}
- return getCollectionOptions(txn, entry->second->catalogEntry->getNamespacesRecordId());
+ return getCollectionOptions(opCtx, entry->second->catalogEntry->getNamespacesRecordId());
}
-CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* txn,
+CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* opCtx,
RecordId rid) const {
CollectionOptions options;
@@ -901,7 +903,7 @@ CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationCont
invariant(rs);
RecordData data;
- invariant(rs->findRecord(txn, rid, &data));
+ invariant(rs->findRecord(opCtx, rid, &data));
if (data.releaseToBson()["options"].isABSONObj()) {
Status status = options.parse(data.releaseToBson()["options"].Obj());
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index ea4342bb868..67e562d4fe2 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -53,7 +53,7 @@ class OperationContext;
class MMAPV1DatabaseCatalogEntry : public DatabaseCatalogEntry {
public:
- MMAPV1DatabaseCatalogEntry(OperationContext* txn,
+ MMAPV1DatabaseCatalogEntry(OperationContext* opCtx,
StringData name,
StringData path,
bool directoryperdb,
@@ -65,9 +65,9 @@ public:
/**
* Must be called before destruction.
*/
- virtual void close(OperationContext* txn) {
- _extentManager->close(txn);
- _namespaceIndex.close(txn);
+ virtual void close(OperationContext* opCtx) {
+ _extentManager->close(opCtx);
+ _namespaceIndex.close(opCtx);
}
// these two seem the same and yet different
@@ -98,14 +98,14 @@ public:
virtual void appendExtraStats(OperationContext* opCtx, BSONObjBuilder* out, double scale) const;
- Status createCollection(OperationContext* txn,
+ Status createCollection(OperationContext* opCtx,
StringData ns,
const CollectionOptions& options,
bool allocateDefaultSpace);
- Status dropCollection(OperationContext* txn, StringData ns);
+ Status dropCollection(OperationContext* opCtx, StringData ns);
- Status renameCollection(OperationContext* txn,
+ Status renameCollection(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
@@ -119,7 +119,7 @@ public:
RecordStore* getRecordStore(StringData ns) const;
- IndexAccessMethod* getIndex(OperationContext* txn,
+ IndexAccessMethod* getIndex(OperationContext* opCtx,
const CollectionCatalogEntry* collection,
IndexCatalogEntry* index);
@@ -130,17 +130,17 @@ public:
return _extentManager.get();
}
- CollectionOptions getCollectionOptions(OperationContext* txn, StringData ns) const;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx, StringData ns) const;
- CollectionOptions getCollectionOptions(OperationContext* txn, RecordId nsRid) const;
+ CollectionOptions getCollectionOptions(OperationContext* opCtx, RecordId nsRid) const;
/**
* Creates a CollectionCatalogEntry in the form of an index rather than a collection.
* MMAPv1 puts both indexes and collections into CCEs. A namespace named 'name' must not
* exist.
*/
- void createNamespaceForIndex(OperationContext* txn, StringData name);
- static void invalidateSystemCollectionRecord(OperationContext* txn,
+ void createNamespaceForIndex(OperationContext* opCtx, StringData name);
+ static void invalidateSystemCollectionRecord(OperationContext* opCtx,
NamespaceString systemCollectionNamespace,
RecordId record);
@@ -172,20 +172,20 @@ private:
RecordStoreV1Base* _getNamespaceRecordStore() const;
RecordStoreV1Base* _getRecordStore(StringData ns) const;
- RecordId _addNamespaceToNamespaceCollection(OperationContext* txn,
+ RecordId _addNamespaceToNamespaceCollection(OperationContext* opCtx,
StringData ns,
const BSONObj* options);
- void _removeNamespaceFromNamespaceCollection(OperationContext* txn, StringData ns);
+ void _removeNamespaceFromNamespaceCollection(OperationContext* opCtx, StringData ns);
- Status _renameSingleNamespace(OperationContext* txn,
+ Status _renameSingleNamespace(OperationContext* opCtx,
StringData fromNS,
StringData toNS,
bool stayTemp);
- void _ensureSystemCollection(OperationContext* txn, StringData ns);
+ void _ensureSystemCollection(OperationContext* opCtx, StringData ns);
- void _init(OperationContext* txn);
+ void _init(OperationContext* opCtx);
/**
* Populate the _collections cache.
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index 36af8f3f06a..5a784b25dc7 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -305,24 +305,24 @@ DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry(OperationContext* op
return entry;
}
-Status MMAPV1Engine::closeDatabase(OperationContext* txn, StringData db) {
+Status MMAPV1Engine::closeDatabase(OperationContext* opCtx, StringData db) {
// Before the files are closed, flush any potentially outstanding changes, which might
// reference this database. Otherwise we will assert when subsequent applications of the
// global journal entries occur, which happen to have write intents for the removed files.
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
if (entry) {
- entry->close(txn);
+ entry->close(opCtx);
}
delete entry;
_entryMap.erase(db.toString());
return Status::OK();
}
-Status MMAPV1Engine::dropDatabase(OperationContext* txn, StringData db) {
- Status status = closeDatabase(txn, db);
+Status MMAPV1Engine::dropDatabase(OperationContext* opCtx, StringData db) {
+ Status status = closeDatabase(opCtx, db);
if (!status.isOK())
return status;
@@ -350,15 +350,15 @@ void MMAPV1Engine::_listDatabases(const std::string& directory, std::vector<std:
}
}
-int MMAPV1Engine::flushAllFiles(OperationContext* txn, bool sync) {
- return MongoFile::flushAll(txn, sync);
+int MMAPV1Engine::flushAllFiles(OperationContext* opCtx, bool sync) {
+ return MongoFile::flushAll(opCtx, sync);
}
-Status MMAPV1Engine::beginBackup(OperationContext* txn) {
+Status MMAPV1Engine::beginBackup(OperationContext* opCtx) {
return Status::OK();
}
-void MMAPV1Engine::endBackup(OperationContext* txn) {
+void MMAPV1Engine::endBackup(OperationContext* opCtx) {
return;
}
@@ -379,15 +379,15 @@ void MMAPV1Engine::cleanShutdown() {
// we would only hang here if the file_allocator code generates a
// synchronous signal, which we don't expect
log() << "shutdown: waiting for fs preallocator..." << endl;
- auto txn = cc().getOperationContext();
+ auto opCtx = cc().getOperationContext();
// In some cases we may shutdown early before we have any operation context yet, but we need
// one for synchronization purposes.
ServiceContext::UniqueOperationContext newTxn;
- if (!txn) {
+ if (!opCtx) {
newTxn = cc().makeOperationContext();
- txn = newTxn.get();
- invariant(txn);
+ opCtx = newTxn.get();
+ invariant(opCtx);
}
FileAllocator::get()->waitUntilFinished();
@@ -395,12 +395,12 @@ void MMAPV1Engine::cleanShutdown() {
if (storageGlobalParams.dur) {
log() << "shutdown: final commit..." << endl;
- getDur().commitAndStopDurThread(txn);
+ getDur().commitAndStopDurThread(opCtx);
}
log() << "shutdown: closing all files..." << endl;
stringstream ss3;
- MemoryMappedFile::closeAllFiles(txn, ss3);
+ MemoryMappedFile::closeAllFiles(opCtx, ss3);
log() << ss3.str() << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index b5d19950d7b..0d7c6b3711e 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -57,9 +57,9 @@ public:
RecoveryUnit* newRecoveryUnit();
void listDatabases(std::vector<std::string>* out) const;
- int flushAllFiles(OperationContext* txn, bool sync);
- Status beginBackup(OperationContext* txn);
- void endBackup(OperationContext* txn);
+ int flushAllFiles(OperationContext* opCtx, bool sync);
+ Status beginBackup(OperationContext* opCtx);
+ void endBackup(OperationContext* opCtx);
DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx, StringData db);
@@ -74,19 +74,19 @@ public:
virtual bool isEphemeral() const;
- virtual Status closeDatabase(OperationContext* txn, StringData db);
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db);
- virtual Status dropDatabase(OperationContext* txn, StringData db);
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db);
virtual void cleanShutdown();
// Callers should use repairDatabase instead.
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) {
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns) {
return Status(ErrorCodes::InternalError, "MMAPv1 doesn't support repairRecordStore");
}
// MMAPv1 specific (non-virtual)
- Status repairDatabase(OperationContext* txn,
+ Status repairDatabase(OperationContext* opCtx,
const std::string& dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 3f9b6019802..29d7952bf22 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -79,9 +79,9 @@ class MmapV1RecordFetcher : public RecordFetcher {
public:
explicit MmapV1RecordFetcher(const MmapV1RecordHeader* record) : _record(record) {}
- virtual void setup(OperationContext* txn) {
+ virtual void setup(OperationContext* opCtx) {
invariant(!_filesLock.get());
- _filesLock.reset(new LockMongoFilesShared(txn));
+ _filesLock.reset(new LockMongoFilesShared(opCtx));
}
virtual void fetch() {
@@ -138,7 +138,7 @@ boost::filesystem::path MmapV1ExtentManager::_fileName(int n) const {
}
-Status MmapV1ExtentManager::init(OperationContext* txn) {
+Status MmapV1ExtentManager::init(OperationContext* opCtx) {
invariant(_files.empty());
for (int n = 0; n < DiskLoc::MaxFiles; n++) {
@@ -172,18 +172,18 @@ Status MmapV1ExtentManager::init(OperationContext* txn) {
}
}
- unique_ptr<DataFile> df(new DataFile(txn, n));
+ unique_ptr<DataFile> df(new DataFile(opCtx, n));
- Status s = df->openExisting(txn, fullNameString.c_str());
+ Status s = df->openExisting(opCtx, fullNameString.c_str());
if (!s.isOK()) {
- df->close(txn);
+ df->close(opCtx);
return s;
}
invariant(!df->getHeader()->uninitialized());
// We only checkUpgrade on files that we are keeping, not preallocs.
- df->getHeader()->checkUpgrade(txn);
+ df->getHeader()->checkUpgrade(opCtx);
_files.push_back(df.release());
}
@@ -191,13 +191,13 @@ Status MmapV1ExtentManager::init(OperationContext* txn) {
// If this is a new database being created, instantiate the first file and one extent so
// we can have a coherent database.
if (_files.empty()) {
- WriteUnitOfWork wuow(txn);
- _createExtent(txn, initialSize(128), false);
+ WriteUnitOfWork wuow(opCtx);
+ _createExtent(opCtx, initialSize(128), false);
wuow.commit();
// Commit the journal and all changes to disk so that even if exceptions occur during
// subsequent initialization, we won't have uncommited changes during file close.
- getDur().commitNow(txn);
+ getDur().commitNow(opCtx);
}
return Status::OK();
@@ -221,12 +221,12 @@ DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) {
return _files[fileId];
}
-DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
+DataFile* MmapV1ExtentManager::_addAFile(OperationContext* opCtx,
int sizeNeeded,
bool preallocateNextFile) {
// Database must be stable and we need to be in some sort of an update operation in order
// to add a new file.
- invariant(txn->lockState()->isDbLockedForMode(_dbname, MODE_IX));
+ invariant(opCtx->lockState()->isDbLockedForMode(_dbname, MODE_IX));
const int allocFileId = _files.size();
@@ -241,15 +241,15 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
}
{
- unique_ptr<DataFile> allocFile(new DataFile(txn, allocFileId));
+ unique_ptr<DataFile> allocFile(new DataFile(opCtx, allocFileId));
const string allocFileName = _fileName(allocFileId).string();
Timer t;
try {
- allocFile->open(txn, allocFileName.c_str(), minSize, false);
+ allocFile->open(opCtx, allocFileName.c_str(), minSize, false);
} catch (...) {
- allocFile->close(txn);
+ allocFile->close(opCtx);
throw;
}
if (t.seconds() > 1) {
@@ -263,13 +263,13 @@ DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
// Preallocate is asynchronous
if (preallocateNextFile) {
- unique_ptr<DataFile> nextFile(new DataFile(txn, allocFileId + 1));
+ unique_ptr<DataFile> nextFile(new DataFile(opCtx, allocFileId + 1));
const string nextFileName = _fileName(allocFileId + 1).string();
try {
- nextFile->open(txn, nextFileName.c_str(), minSize, false);
+ nextFile->open(opCtx, nextFileName.c_str(), minSize, false);
} catch (...) {
- nextFile->close(txn);
+ nextFile->close(opCtx);
throw;
}
}
@@ -366,26 +366,26 @@ int MmapV1ExtentManager::maxSize() const {
}
DiskLoc MmapV1ExtentManager::_createExtentInFile(
- OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota) {
+ OperationContext* opCtx, int fileNo, DataFile* f, int size, bool enforceQuota) {
_checkQuota(enforceQuota, fileNo - 1);
massert(10358, "bad new extent size", size >= minSize() && size <= maxSize());
- DiskLoc loc = f->allocExtentArea(txn, size);
+ DiskLoc loc = f->allocExtentArea(opCtx, size);
loc.assertOk();
Extent* e = getExtent(loc, false);
verify(e);
- *txn->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
- *txn->recoveryUnit()->writing(&e->myLoc) = loc;
- *txn->recoveryUnit()->writing(&e->length) = size;
+ *opCtx->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
+ *opCtx->recoveryUnit()->writing(&e->myLoc) = loc;
+ *opCtx->recoveryUnit()->writing(&e->length) = size;
return loc;
}
-DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool enforceQuota) {
+DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* opCtx, int size, bool enforceQuota) {
size = quantizeExtentSize(size);
if (size > maxSize())
@@ -398,7 +398,7 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
invariant(f);
if (f->getHeader()->unusedLength >= size) {
- return _createExtentInFile(txn, i, f, size, enforceQuota);
+ return _createExtentInFile(opCtx, i, f, size, enforceQuota);
}
}
@@ -407,10 +407,10 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
// no space in an existing file
// allocate files until we either get one big enough or hit maxSize
for (int i = 0; i < 8; i++) {
- DataFile* f = _addAFile(txn, size, false);
+ DataFile* f = _addAFile(opCtx, size, false);
if (f->getHeader()->unusedLength >= size) {
- return _createExtentInFile(txn, numFiles() - 1, f, size, enforceQuota);
+ return _createExtentInFile(opCtx, numFiles() - 1, f, size, enforceQuota);
}
}
@@ -418,7 +418,7 @@ DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool
msgasserted(14810, "couldn't allocate space for a new extent");
}
-DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
+DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* opCtx,
int approxSize,
bool capped) {
// setup extent constraints
@@ -493,27 +493,27 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
// remove from the free list
if (!best->xprev.isNull())
- *txn->recoveryUnit()->writing(&getExtent(best->xprev)->xnext) = best->xnext;
+ *opCtx->recoveryUnit()->writing(&getExtent(best->xprev)->xnext) = best->xnext;
if (!best->xnext.isNull())
- *txn->recoveryUnit()->writing(&getExtent(best->xnext)->xprev) = best->xprev;
+ *opCtx->recoveryUnit()->writing(&getExtent(best->xnext)->xprev) = best->xprev;
if (_getFreeListStart() == best->myLoc)
- _setFreeListStart(txn, best->xnext);
+ _setFreeListStart(opCtx, best->xnext);
if (_getFreeListEnd() == best->myLoc)
- _setFreeListEnd(txn, best->xprev);
+ _setFreeListEnd(opCtx, best->xprev);
return best->myLoc;
}
-DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
+DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
bool fromFreeList = true;
- DiskLoc eloc = _allocFromFreeList(txn, size, capped);
+ DiskLoc eloc = _allocFromFreeList(opCtx, size, capped);
if (eloc.isNull()) {
fromFreeList = false;
- eloc = _createExtent(txn, size, enforceQuota);
+ eloc = _createExtent(opCtx, size, enforceQuota);
}
invariant(!eloc.isNull());
@@ -525,29 +525,29 @@ DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
return eloc;
}
-void MmapV1ExtentManager::freeExtent(OperationContext* txn, DiskLoc firstExt) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+void MmapV1ExtentManager::freeExtent(OperationContext* opCtx, DiskLoc firstExt) {
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
Extent* e = getExtent(firstExt);
- txn->recoveryUnit()->writing(&e->xnext)->Null();
- txn->recoveryUnit()->writing(&e->xprev)->Null();
- txn->recoveryUnit()->writing(&e->firstRecord)->Null();
- txn->recoveryUnit()->writing(&e->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->xnext)->Null();
+ opCtx->recoveryUnit()->writing(&e->xprev)->Null();
+ opCtx->recoveryUnit()->writing(&e->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->lastRecord)->Null();
if (_getFreeListStart().isNull()) {
- _setFreeListStart(txn, firstExt);
- _setFreeListEnd(txn, firstExt);
+ _setFreeListStart(opCtx, firstExt);
+ _setFreeListEnd(opCtx, firstExt);
} else {
DiskLoc a = _getFreeListStart();
invariant(getExtent(a)->xprev.isNull());
- *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = firstExt;
- *txn->recoveryUnit()->writing(&getExtent(firstExt)->xnext) = a;
- _setFreeListStart(txn, firstExt);
+ *opCtx->recoveryUnit()->writing(&getExtent(a)->xprev) = firstExt;
+ *opCtx->recoveryUnit()->writing(&getExtent(firstExt)->xnext) = a;
+ _setFreeListStart(opCtx, firstExt);
}
}
-void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+void MmapV1ExtentManager::freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) {
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_X);
if (firstExt.isNull() && lastExt.isNull())
return;
@@ -563,14 +563,14 @@ void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, D
}
if (_getFreeListStart().isNull()) {
- _setFreeListStart(txn, firstExt);
- _setFreeListEnd(txn, lastExt);
+ _setFreeListStart(opCtx, firstExt);
+ _setFreeListEnd(opCtx, lastExt);
} else {
DiskLoc a = _getFreeListStart();
invariant(getExtent(a)->xprev.isNull());
- *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = lastExt;
- *txn->recoveryUnit()->writing(&getExtent(lastExt)->xnext) = a;
- _setFreeListStart(txn, firstExt);
+ *opCtx->recoveryUnit()->writing(&getExtent(a)->xprev) = lastExt;
+ *opCtx->recoveryUnit()->writing(&getExtent(lastExt)->xnext) = a;
+ _setFreeListStart(opCtx, firstExt);
}
}
@@ -588,22 +588,22 @@ DiskLoc MmapV1ExtentManager::_getFreeListEnd() const {
return file->header()->freeListEnd;
}
-void MmapV1ExtentManager::_setFreeListStart(OperationContext* txn, DiskLoc loc) {
+void MmapV1ExtentManager::_setFreeListStart(OperationContext* opCtx, DiskLoc loc) {
invariant(!_files.empty());
DataFile* file = _files[0];
- *txn->recoveryUnit()->writing(&file->header()->freeListStart) = loc;
+ *opCtx->recoveryUnit()->writing(&file->header()->freeListStart) = loc;
}
-void MmapV1ExtentManager::_setFreeListEnd(OperationContext* txn, DiskLoc loc) {
+void MmapV1ExtentManager::_setFreeListEnd(OperationContext* opCtx, DiskLoc loc) {
invariant(!_files.empty());
DataFile* file = _files[0];
- *txn->recoveryUnit()->writing(&file->header()->freeListEnd) = loc;
+ *opCtx->recoveryUnit()->writing(&file->header()->freeListEnd) = loc;
}
-void MmapV1ExtentManager::freeListStats(OperationContext* txn,
+void MmapV1ExtentManager::freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_S);
+ Lock::ResourceLock rlk(opCtx->lockState(), _rid, MODE_S);
invariant(numExtents);
invariant(totalFreeSizeBytes);
@@ -644,9 +644,9 @@ MmapV1ExtentManager::FilesArray::~FilesArray() {
}
}
-void MmapV1ExtentManager::FilesArray::close(OperationContext* txn) {
+void MmapV1ExtentManager::FilesArray::close(OperationContext* opCtx) {
for (int i = 0; i < size(); i++) {
- _files[i]->close(txn);
+ _files[i]->close(opCtx);
}
}
@@ -659,7 +659,7 @@ void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
_size.store(n + 1);
}
-DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const {
+DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* opCtx) const {
if (numFiles() == 0)
return DataFileVersion(0, 0);
@@ -667,12 +667,12 @@ DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const
return _getOpenFile(0)->getHeader()->version;
}
-void MmapV1ExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {
+void MmapV1ExtentManager::setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) {
invariant(numFiles() > 0);
DataFile* df = _getOpenFile(0);
invariant(df);
- *txn->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
+ *opCtx->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index fb891ee8227..dff9de9efe9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -92,34 +92,34 @@ public:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn) {
- _files.close(txn);
+ void close(OperationContext* opCtx) {
+ _files.close(opCtx);
}
/**
* opens all current files, not thread safe
*/
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
int numFiles() const;
long long fileSize() const;
// must call Extent::reuse on the returned extent
- DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
+ DiskLoc allocateExtent(OperationContext* opCtx, bool capped, int size, bool enforceQuota);
/**
* firstExt has to be == lastExt or a chain
*/
- void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
+ void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt);
/**
* frees a single extent
* ignores all fields in the Extent except: magic, myLoc, length
*/
- void freeExtent(OperationContext* txn, DiskLoc extent);
+ void freeExtent(OperationContext* opCtx, DiskLoc extent);
- void freeListStats(OperationContext* txn, int* numExtents, int64_t* totalFreeSizeBytes) const;
+ void freeListStats(OperationContext* opCtx, int* numExtents, int64_t* totalFreeSizeBytes) const;
/**
* @param loc - has to be for a specific MmapV1RecordHeader
@@ -152,8 +152,8 @@ public:
/**
* Not thread safe, requires a database exclusive lock
*/
- DataFileVersion getFileFormat(OperationContext* txn) const final;
- void setFileFormat(OperationContext* txn, DataFileVersion newVersion) final;
+ DataFileVersion getFileFormat(OperationContext* opCtx) const final;
+ void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) final;
const DataFile* getOpenFile(int n) const final {
return _getOpenFile(n);
@@ -167,13 +167,13 @@ private:
/**
* will return NULL if nothing suitable in free list
*/
- DiskLoc _allocFromFreeList(OperationContext* txn, int approxSize, bool capped);
+ DiskLoc _allocFromFreeList(OperationContext* opCtx, int approxSize, bool capped);
/* allocate a new Extent, does not check free list
*/
- DiskLoc _createExtent(OperationContext* txn, int approxSize, bool enforceQuota);
+ DiskLoc _createExtent(OperationContext* opCtx, int approxSize, bool enforceQuota);
- DataFile* _addAFile(OperationContext* txn, int sizeNeeded, bool preallocateNextFile);
+ DataFile* _addAFile(OperationContext* opCtx, int sizeNeeded, bool preallocateNextFile);
/**
@@ -184,14 +184,14 @@ private:
DiskLoc _getFreeListStart() const;
DiskLoc _getFreeListEnd() const;
- void _setFreeListStart(OperationContext* txn, DiskLoc loc);
- void _setFreeListEnd(OperationContext* txn, DiskLoc loc);
+ void _setFreeListStart(OperationContext* opCtx, DiskLoc loc);
+ void _setFreeListEnd(OperationContext* opCtx, DiskLoc loc);
const DataFile* _getOpenFile(int fileId) const;
DataFile* _getOpenFile(int fileId);
DiskLoc _createExtentInFile(
- OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota);
+ OperationContext* opCtx, int fileNo, DataFile* f, int size, bool enforceQuota);
boost::filesystem::path _fileName(int n) const;
@@ -219,7 +219,7 @@ private:
/**
* Must be called before destruction.
*/
- void close(OperationContext* txn);
+ void close(OperationContext* opCtx);
/**
* Returns file at location 'n' in the array, with 'n' less than number of files added.
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
index 29c7e0e92c7..ce670175fbd 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
@@ -50,25 +50,25 @@ public:
MyHarnessHelper() {}
virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
auto md = stdx::make_unique<DummyRecordStoreV1MetaData>(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
- return stdx::make_unique<SimpleRecordStoreV1>(&txn, "a.b", md.release(), &_em, false);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
+ return stdx::make_unique<SimpleRecordStoreV1>(&opCtx, "a.b", md.release(), &_em, false);
}
std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedMaxSize,
int64_t cappedMaxDocs) final {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
auto md = stdx::make_unique<DummyRecordStoreV1MetaData>(true, 0);
auto md_ptr = md.get();
- std::unique_ptr<RecordStore> rs =
- stdx::make_unique<CappedRecordStoreV1>(&txn, nullptr, "a.b", md.release(), &_em, false);
+ std::unique_ptr<RecordStore> rs = stdx::make_unique<CappedRecordStoreV1>(
+ &opCtx, nullptr, "a.b", md.release(), &_em, false);
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &_em, md_ptr);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &_em, md_ptr);
return rs;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index d8e8d61e624..e34bee74ade 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -148,8 +148,8 @@ static void* getNextMemoryMappedFileLocation(unsigned long long mmfSize) {
return reinterpret_cast<void*>(static_cast<uintptr_t>(thisMemoryMappedFileLocation));
}
-void MemoryMappedFile::close(OperationContext* txn) {
- LockMongoFilesShared::assertExclusivelyLocked(txn);
+void MemoryMappedFile::close(OperationContext* opCtx) {
+ LockMongoFilesShared::assertExclusivelyLocked(opCtx);
// Prevent flush and close from concurrently running
stdx::lock_guard<stdx::mutex> lk(_flushMutex);
@@ -174,18 +174,18 @@ void MemoryMappedFile::close(OperationContext* txn) {
fd = 0;
}
- destroyed(txn); // cleans up from the master list of mmaps
+ destroyed(opCtx); // cleans up from the master list of mmaps
}
bool MemoryMappedFile::isClosed() {
return !len && !fd && !views.size();
}
-void* MemoryMappedFile::map(OperationContext* txn,
+void* MemoryMappedFile::map(OperationContext* opCtx,
const char* filenameIn,
unsigned long long& length) {
verify(fd == 0 && len == 0); // can't open more than once
- setFilename(txn, filenameIn);
+ setFilename(opCtx, filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
* perhaps. */
@@ -244,8 +244,8 @@ void* MemoryMappedFile::map(OperationContext* txn,
severe() << "CreateFileMappingW for " << filename << " failed with "
<< errnoWithDescription(dosError) << " (file size is " << length << ")"
<< " in MemoryMappedFile::map" << endl;
- LockMongoFilesExclusive lock(txn);
- close(txn);
+ LockMongoFilesExclusive lock(opCtx);
+ close(opCtx);
fassertFailed(16225);
}
}
@@ -296,8 +296,8 @@ void* MemoryMappedFile::map(OperationContext* txn,
<< length << ")"
<< " in MemoryMappedFile::map" << endl;
- LockMongoFilesExclusive lock(txn);
- close(txn);
+ LockMongoFilesExclusive lock(opCtx);
+ close(opCtx);
fassertFailed(16166);
}
@@ -359,8 +359,8 @@ void* MemoryMappedFile::createPrivateMap() {
return privateMapAddress;
}
-void* MemoryMappedFile::remapPrivateView(OperationContext* txn, void* oldPrivateAddr) {
- LockMongoFilesExclusive lockMongoFiles(txn);
+void* MemoryMappedFile::remapPrivateView(OperationContext* opCtx, void* oldPrivateAddr) {
+ LockMongoFilesExclusive lockMongoFiles(opCtx);
privateViews.clearWritableBits(oldPrivateAddr, len);
@@ -406,12 +406,12 @@ public:
_filename(filename),
_flushMutex(flushMutex) {}
- void flush(OperationContext* txn) {
+ void flush(OperationContext* opCtx) {
if (!_view || !_fd)
return;
{
- LockMongoFilesShared mmfilesLock(txn);
+ LockMongoFilesShared mmfilesLock(opCtx);
std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
@@ -475,9 +475,9 @@ void MemoryMappedFile::flush(bool sync) {
uassert(13056, "Async flushing not supported on windows", sync);
if (!views.empty()) {
WindowsFlushable f(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
- auto txn = cc().getOperationContext();
- invariant(txn);
- f.flush(txn);
+ auto opCtx = cc().getOperationContext();
+ invariant(opCtx);
+ f.flush(opCtx);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index d67fa341cb4..2c49cf8b5c8 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -139,7 +139,7 @@ RecordStoreV1Base::RecordStoreV1Base(StringData ns,
RecordStoreV1Base::~RecordStoreV1Base() {}
-int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
+int64_t RecordStoreV1Base::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int level) const {
BSONArrayBuilder extentInfo;
@@ -147,7 +147,7 @@ int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
int64_t total = 0;
int n = 0;
- DiskLoc cur = _details->firstExtent(txn);
+ DiskLoc cur = _details->firstExtent(opCtx);
while (!cur.isNull()) {
Extent* e = _extentManager->getExtent(cur);
@@ -170,11 +170,11 @@ int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
return total;
}
-RecordData RecordStoreV1Base::dataFor(OperationContext* txn, const RecordId& loc) const {
+RecordData RecordStoreV1Base::dataFor(OperationContext* opCtx, const RecordId& loc) const {
return recordFor(DiskLoc::fromRecordId(loc))->toRecordData();
}
-bool RecordStoreV1Base::findRecord(OperationContext* txn,
+bool RecordStoreV1Base::findRecord(OperationContext* opCtx,
const RecordId& loc,
RecordData* rd) const {
// this is a bit odd, as the semantics of using the storage engine imply it _has_ to be.
@@ -202,28 +202,29 @@ DeletedRecord* RecordStoreV1Base::drec(const DiskLoc& loc) const {
return reinterpret_cast<DeletedRecord*>(recordFor(loc));
}
-Extent* RecordStoreV1Base::_getExtent(OperationContext* txn, const DiskLoc& loc) const {
+Extent* RecordStoreV1Base::_getExtent(OperationContext* opCtx, const DiskLoc& loc) const {
return _extentManager->getExtent(loc);
}
-DiskLoc RecordStoreV1Base::_getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::_getExtentLocForRecord(OperationContext* opCtx,
+ const DiskLoc& loc) const {
return _extentManager->extentLocForV1(loc);
}
-DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* txn, const DiskLoc& loc) const {
- DiskLoc next = getNextRecordInExtent(txn, loc);
+DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const {
+ DiskLoc next = getNextRecordInExtent(opCtx, loc);
if (!next.isNull()) {
return next;
}
// now traverse extents
- Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ Extent* e = _getExtent(opCtx, _getExtentLocForRecord(opCtx, loc));
while (1) {
if (e->xnext.isNull())
return DiskLoc(); // end of collection
- e = _getExtent(txn, e->xnext);
+ e = _getExtent(opCtx, e->xnext);
if (!e->firstRecord.isNull())
break;
// entire extent could be empty, keep looking
@@ -231,19 +232,19 @@ DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* txn, const DiskLoc& l
return e->firstRecord;
}
-DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* txn, const DiskLoc& loc) const {
- DiskLoc prev = getPrevRecordInExtent(txn, loc);
+DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const {
+ DiskLoc prev = getPrevRecordInExtent(opCtx, loc);
if (!prev.isNull()) {
return prev;
}
// now traverse extents
- Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ Extent* e = _getExtent(opCtx, _getExtentLocForRecord(opCtx, loc));
while (1) {
if (e->xprev.isNull())
return DiskLoc(); // end of collection
- e = _getExtent(txn, e->xprev);
+ e = _getExtent(opCtx, e->xprev);
if (!e->firstRecord.isNull())
break;
// entire extent could be empty, keep looking
@@ -251,7 +252,7 @@ DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* txn, const DiskLoc& l
return e->lastRecord;
}
-DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* txn,
+DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* opCtx,
const DiskLoc& extDiskLoc,
Extent* e) {
DiskLoc emptyLoc = extDiskLoc;
@@ -266,14 +267,15 @@ DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* txn,
ofs = newOfs;
}
- DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc));
+ DeletedRecord* empty = opCtx->recoveryUnit()->writing(drec(emptyLoc));
empty->lengthWithHeaders() = delRecLength;
empty->extentOfs() = e->myLoc.getOfs();
empty->nextDeleted().Null();
return emptyLoc;
}
-DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* opCtx,
+ const DiskLoc& loc) const {
int nextOffset = recordFor(loc)->nextOfs();
if (nextOffset == DiskLoc::NullOfs)
@@ -284,7 +286,8 @@ DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* txn, const Di
return result;
}
-DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* opCtx,
+ const DiskLoc& loc) const {
int prevOffset = recordFor(loc)->prevOfs();
if (prevOffset == DiskLoc::NullOfs)
@@ -295,7 +298,7 @@ DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* txn, const Di
return result;
}
-Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
+Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -312,19 +315,19 @@ Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
? quantizeAllocationSpace(lenWHdr)
: lenWHdr;
- StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, /*enforceQuota=*/false);
+ StatusWith<DiskLoc> loc = allocRecord(opCtx, lenToAlloc, /*enforceQuota=*/false);
if (!loc.isOK())
return loc.getStatus();
MmapV1RecordHeader* r = recordFor(loc.getValue());
fassert(17319, r->lengthWithHeaders() >= lenWHdr);
- r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ r = reinterpret_cast<MmapV1RecordHeader*>(opCtx->recoveryUnit()->writingPtr(r, lenWHdr));
docs[i]->writeDocument(r->data());
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ _addRecordToRecListInExtent(opCtx, r, loc.getValue());
- _details->incrementStats(txn, r->netLength(), 1);
+ _details->incrementStats(opCtx, r->netLength(), 1);
if (idsOut)
idsOut[i] = loc.getValue().toRecordId();
@@ -335,7 +338,7 @@ Status RecordStoreV1Base::insertRecordsWithDocWriter(OperationContext* txn,
}
-StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
+StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -347,10 +350,10 @@ StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
}
- return _insertRecord(txn, data, len, enforceQuota);
+ return _insertRecord(opCtx, data, len, enforceQuota);
}
-StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
+StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
@@ -358,7 +361,7 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
const int lenToAlloc = shouldPadInserts() ? quantizeAllocationSpace(lenWHdr) : lenWHdr;
fassert(17208, lenToAlloc >= lenWHdr);
- StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
+ StatusWith<DiskLoc> loc = allocRecord(opCtx, lenToAlloc, enforceQuota);
if (!loc.isOK())
return StatusWith<RecordId>(loc.getStatus());
@@ -366,17 +369,17 @@ StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
fassert(17210, r->lengthWithHeaders() >= lenWHdr);
// copy the data
- r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ r = reinterpret_cast<MmapV1RecordHeader*>(opCtx->recoveryUnit()->writingPtr(r, lenWHdr));
memcpy(r->data(), data, len);
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ _addRecordToRecListInExtent(opCtx, r, loc.getValue());
- _details->incrementStats(txn, r->netLength(), 1);
+ _details->incrementStats(opCtx, r->netLength(), 1);
return StatusWith<RecordId>(loc.getValue().toRecordId());
}
-Status RecordStoreV1Base::updateRecord(OperationContext* txn,
+Status RecordStoreV1Base::updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int dataSize,
@@ -386,13 +389,13 @@ Status RecordStoreV1Base::updateRecord(OperationContext* txn,
if (oldRecord->netLength() >= dataSize) {
// Make sure to notify other queries before we do an in-place update.
if (notifier) {
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation);
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(opCtx, oldLocation);
if (!callbackStatus.isOK())
return callbackStatus;
}
// we fit
- memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
+ memcpy(opCtx->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
return Status::OK();
}
@@ -407,7 +410,7 @@ bool RecordStoreV1Base::updateWithDamagesSupported() const {
}
StatusWith<RecordData> RecordStoreV1Base::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -420,14 +423,15 @@ StatusWith<RecordData> RecordStoreV1Base::updateWithDamages(
const mutablebson::DamageVector::const_iterator end = damages.end();
for (; where != end; ++where) {
const char* sourcePtr = damageSource + where->sourceOffset;
- void* targetPtr = txn->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
+ void* targetPtr =
+ opCtx->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
std::memcpy(targetPtr, sourcePtr, where->size);
}
return rec->toRecordData();
}
-void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid) {
+void RecordStoreV1Base::deleteRecord(OperationContext* opCtx, const RecordId& rid) {
const DiskLoc dl = DiskLoc::fromRecordId(rid);
MmapV1RecordHeader* todelete = recordFor(dl);
@@ -436,31 +440,31 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
/* remove ourself from the record next/prev chain */
{
if (todelete->prevOfs() != DiskLoc::NullOfs) {
- DiskLoc prev = getPrevRecordInExtent(txn, dl);
+ DiskLoc prev = getPrevRecordInExtent(opCtx, dl);
MmapV1RecordHeader* prevRecord = recordFor(prev);
- txn->recoveryUnit()->writingInt(prevRecord->nextOfs()) = todelete->nextOfs();
+ opCtx->recoveryUnit()->writingInt(prevRecord->nextOfs()) = todelete->nextOfs();
}
if (todelete->nextOfs() != DiskLoc::NullOfs) {
- DiskLoc next = getNextRecord(txn, dl);
+ DiskLoc next = getNextRecord(opCtx, dl);
MmapV1RecordHeader* nextRecord = recordFor(next);
- txn->recoveryUnit()->writingInt(nextRecord->prevOfs()) = todelete->prevOfs();
+ opCtx->recoveryUnit()->writingInt(nextRecord->prevOfs()) = todelete->prevOfs();
}
}
/* remove ourself from extent pointers */
{
DiskLoc extentLoc = todelete->myExtentLoc(dl);
- Extent* e = _getExtent(txn, extentLoc);
+ Extent* e = _getExtent(opCtx, extentLoc);
if (e->firstRecord == dl) {
- txn->recoveryUnit()->writing(&e->firstRecord);
+ opCtx->recoveryUnit()->writing(&e->firstRecord);
if (todelete->nextOfs() == DiskLoc::NullOfs)
e->firstRecord.Null();
else
e->firstRecord.set(dl.a(), todelete->nextOfs());
}
if (e->lastRecord == dl) {
- txn->recoveryUnit()->writing(&e->lastRecord);
+ opCtx->recoveryUnit()->writing(&e->lastRecord);
if (todelete->prevOfs() == DiskLoc::NullOfs)
e->lastRecord.Null();
else
@@ -470,7 +474,7 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
/* add to the free list */
{
- _details->incrementStats(txn, -1 * todelete->netLength(), -1);
+ _details->incrementStats(opCtx, -1 * todelete->netLength(), -1);
if (_isSystemIndexes) {
/* temp: if in system.indexes, don't reuse, and zero out: we want to be
@@ -478,76 +482,76 @@ void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid)
to this disk location. so an incorrectly done remove would cause
a lot of problems.
*/
- memset(txn->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders()),
+ memset(opCtx->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders()),
0,
todelete->lengthWithHeaders());
} else {
// this is defensive so we can detect if we are still using a location
// that was deleted
- memset(txn->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
- addDeletedRec(txn, dl);
+ memset(opCtx->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
+ addDeletedRec(opCtx, dl);
}
}
}
-std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(OperationContext* txn) const {
- return stdx::make_unique<RecordStoreV1RepairCursor>(txn, this);
+std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(OperationContext* opCtx) const {
+ return stdx::make_unique<RecordStoreV1RepairCursor>(opCtx, this);
}
-void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
+void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* opCtx,
MmapV1RecordHeader* r,
DiskLoc loc) {
dassert(recordFor(loc) == r);
- DiskLoc extentLoc = _getExtentLocForRecord(txn, loc);
- Extent* e = _getExtent(txn, extentLoc);
+ DiskLoc extentLoc = _getExtentLocForRecord(opCtx, loc);
+ Extent* e = _getExtent(opCtx, extentLoc);
if (e->lastRecord.isNull()) {
- *txn->recoveryUnit()->writing(&e->firstRecord) = loc;
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ *opCtx->recoveryUnit()->writing(&e->firstRecord) = loc;
+ *opCtx->recoveryUnit()->writing(&e->lastRecord) = loc;
r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
} else {
MmapV1RecordHeader* oldlast = recordFor(e->lastRecord);
r->prevOfs() = e->lastRecord.getOfs();
r->nextOfs() = DiskLoc::NullOfs;
- txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ opCtx->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
+ *opCtx->recoveryUnit()->writing(&e->lastRecord) = loc;
}
}
-void RecordStoreV1Base::increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
- DiskLoc eloc = _extentManager->allocateExtent(txn, isCapped(), size, enforceQuota);
+void RecordStoreV1Base::increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota) {
+ DiskLoc eloc = _extentManager->allocateExtent(opCtx, isCapped(), size, enforceQuota);
Extent* e = _extentManager->getExtent(eloc);
invariant(e);
- *txn->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;
+ *opCtx->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;
- txn->recoveryUnit()->writing(&e->xnext)->Null();
- txn->recoveryUnit()->writing(&e->xprev)->Null();
- txn->recoveryUnit()->writing(&e->firstRecord)->Null();
- txn->recoveryUnit()->writing(&e->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->xnext)->Null();
+ opCtx->recoveryUnit()->writing(&e->xprev)->Null();
+ opCtx->recoveryUnit()->writing(&e->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&e->lastRecord)->Null();
- DiskLoc emptyLoc = _findFirstSpot(txn, eloc, e);
+ DiskLoc emptyLoc = _findFirstSpot(opCtx, eloc, e);
- if (_details->lastExtent(txn).isNull()) {
- invariant(_details->firstExtent(txn).isNull());
- _details->setFirstExtent(txn, eloc);
- _details->setLastExtent(txn, eloc);
- _details->setCapExtent(txn, eloc);
+ if (_details->lastExtent(opCtx).isNull()) {
+ invariant(_details->firstExtent(opCtx).isNull());
+ _details->setFirstExtent(opCtx, eloc);
+ _details->setLastExtent(opCtx, eloc);
+ _details->setCapExtent(opCtx, eloc);
invariant(e->xprev.isNull());
invariant(e->xnext.isNull());
} else {
- invariant(!_details->firstExtent(txn).isNull());
- *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
- *txn->recoveryUnit()->writing(
- &_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
- _details->setLastExtent(txn, eloc);
+ invariant(!_details->firstExtent(opCtx).isNull());
+ *opCtx->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(opCtx);
+ *opCtx->recoveryUnit()->writing(
+ &_extentManager->getExtent(_details->lastExtent(opCtx))->xnext) = eloc;
+ _details->setLastExtent(opCtx, eloc);
}
- _details->setLastExtentSize(txn, e->length);
+ _details->setLastExtentSize(opCtx, e->length);
- addDeletedRec(txn, emptyLoc);
+ addDeletedRec(opCtx, emptyLoc);
}
-Status RecordStoreV1Base::validate(OperationContext* txn,
+Status RecordStoreV1Base::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -568,22 +572,22 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
output->appendNumber("datasize", _details->dataSize());
output->appendNumber("nrecords", _details->numRecords());
- output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
+ output->appendNumber("lastExtentSize", _details->lastExtentSize(opCtx));
- if (_details->firstExtent(txn).isNull())
+ if (_details->firstExtent(opCtx).isNull())
output->append("firstExtent", "null");
else
- output->append(
- "firstExtent",
- str::stream() << _details->firstExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
- if (_details->lastExtent(txn).isNull())
+ output->append("firstExtent",
+ str::stream() << _details->firstExtent(opCtx).toString() << " ns:"
+ << _getExtent(opCtx, _details->firstExtent(opCtx))
+ ->nsDiagnostic.toString());
+ if (_details->lastExtent(opCtx).isNull())
output->append("lastExtent", "null");
else
- output->append(
- "lastExtent",
- str::stream() << _details->lastExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
+ output->append("lastExtent",
+ str::stream() << _details->lastExtent(opCtx).toString() << " ns:"
+ << _getExtent(opCtx, _details->lastExtent(opCtx))
+ ->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
@@ -591,14 +595,14 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
int extentCount = 0;
DiskLoc extentDiskLoc;
try {
- if (!_details->firstExtent(txn).isNull()) {
- _getExtent(txn, _details->firstExtent(txn))->assertOk();
- _getExtent(txn, _details->lastExtent(txn))->assertOk();
+ if (!_details->firstExtent(opCtx).isNull()) {
+ _getExtent(opCtx, _details->firstExtent(opCtx))->assertOk();
+ _getExtent(opCtx, _details->lastExtent(opCtx))->assertOk();
}
- extentDiskLoc = _details->firstExtent(txn);
+ extentDiskLoc = _details->firstExtent(opCtx);
while (!extentDiskLoc.isNull()) {
- Extent* thisExtent = _getExtent(txn, extentDiskLoc);
+ Extent* thisExtent = _getExtent(opCtx, extentDiskLoc);
if (level == kValidateFull) {
extentData << thisExtent->dump();
}
@@ -608,24 +612,24 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
DiskLoc nextDiskLoc = thisExtent->xnext;
if (extentCount > 0 && !nextDiskLoc.isNull() &&
- _getExtent(txn, nextDiskLoc)->xprev != extentDiskLoc) {
+ _getExtent(opCtx, nextDiskLoc)->xprev != extentDiskLoc) {
StringBuilder sb;
- sb << "'xprev' pointer " << _getExtent(txn, nextDiskLoc)->xprev.toString()
+ sb << "'xprev' pointer " << _getExtent(opCtx, nextDiskLoc)->xprev.toString()
<< " in extent " << nextDiskLoc.toString() << " does not point to extent "
<< extentDiskLoc.toString();
results->errors.push_back(sb.str());
results->valid = false;
}
- if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
+ if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(opCtx)) {
StringBuilder sb;
- sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
+ sb << "'lastExtent' pointer " << _details->lastExtent(opCtx).toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
results->errors.push_back(sb.str());
results->valid = false;
}
extentDiskLoc = nextDiskLoc;
extentCount++;
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
} catch (const DBException& e) {
StringBuilder sb;
@@ -644,31 +648,31 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
// 333333333333333333333333333
bool testingLastExtent = false;
try {
- DiskLoc firstExtentLoc = _details->firstExtent(txn);
+ DiskLoc firstExtentLoc = _details->firstExtent(opCtx);
if (firstExtentLoc.isNull()) {
// this is ok
} else {
- output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
- if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
+ output->append("firstExtentDetails", _getExtent(opCtx, firstExtentLoc)->dump());
+ if (!_getExtent(opCtx, firstExtentLoc)->xprev.isNull()) {
StringBuilder sb;
sb << "'xprev' pointer in 'firstExtent' "
- << _details->firstExtent(txn).toString() << " is "
- << _getExtent(txn, firstExtentLoc)->xprev.toString() << ", should be null";
+ << _details->firstExtent(opCtx).toString() << " is "
+ << _getExtent(opCtx, firstExtentLoc)->xprev.toString() << ", should be null";
results->errors.push_back(sb.str());
results->valid = false;
}
}
testingLastExtent = true;
- DiskLoc lastExtentLoc = _details->lastExtent(txn);
+ DiskLoc lastExtentLoc = _details->lastExtent(opCtx);
if (lastExtentLoc.isNull()) {
// this is ok
} else {
if (firstExtentLoc != lastExtentLoc) {
- output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
- if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
+ output->append("lastExtentDetails", _getExtent(opCtx, lastExtentLoc)->dump());
+ if (!_getExtent(opCtx, lastExtentLoc)->xnext.isNull()) {
StringBuilder sb;
sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
- << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
+ << " is " << _getExtent(opCtx, lastExtentLoc)->xnext.toString()
<< ", should be null";
results->errors.push_back(sb.str());
results->valid = false;
@@ -696,7 +700,7 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
int outOfOrder = 0;
DiskLoc dl_last;
- auto cursor = getCursor(txn);
+ auto cursor = getCursor(opCtx);
while (auto record = cursor->next()) {
const auto dl = DiskLoc::fromRecordId(record->id);
n++;
@@ -800,7 +804,7 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
delSize += d->lengthWithHeaders();
loc = d->nextDeleted();
k++;
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
delBucketSizes << k;
} catch (...) {
@@ -829,10 +833,10 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
return Status::OK();
}
-void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
+void RecordStoreV1Base::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
- result->append("lastExtentSize", _details->lastExtentSize(txn) / scale);
+ result->append("lastExtentSize", _details->lastExtentSize(opCtx) / scale);
result->append("paddingFactor", 1.0); // hard coded
result->append("paddingFactorNote",
"paddingFactor is unused and unmaintained in 3.0. It "
@@ -841,7 +845,8 @@ void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
result->appendBool("capped", isCapped());
if (isCapped()) {
result->appendNumber("max", _details->maxCappedDocs());
- result->appendNumber("maxSize", static_cast<long long>(storageSize(txn, NULL, 0) / scale));
+ result->appendNumber("maxSize",
+ static_cast<long long>(storageSize(opCtx, NULL, 0) / scale));
}
}
@@ -853,13 +858,13 @@ struct touch_location {
};
}
-Status RecordStoreV1Base::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status RecordStoreV1Base::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
Timer t;
std::vector<touch_location> ranges;
{
- DiskLoc nextLoc = _details->firstExtent(txn);
- Extent* ext = nextLoc.isNull() ? NULL : _getExtent(txn, nextLoc);
+ DiskLoc nextLoc = _details->firstExtent(opCtx);
+ Extent* ext = nextLoc.isNull() ? NULL : _getExtent(opCtx, nextLoc);
while (ext) {
touch_location tl;
tl.root = reinterpret_cast<const char*>(ext);
@@ -870,20 +875,20 @@ Status RecordStoreV1Base::touch(OperationContext* txn, BSONObjBuilder* output) c
if (nextLoc.isNull())
ext = NULL;
else
- ext = _getExtent(txn, nextLoc);
+ ext = _getExtent(opCtx, nextLoc);
}
}
std::string progress_msg = "touch " + ns() + " extents";
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(
- *txn->setMessage_inlock(progress_msg.c_str(), "Touch Progress", ranges.size()));
+ *opCtx->setMessage_inlock(progress_msg.c_str(), "Touch Progress", ranges.size()));
lk.unlock();
for (std::vector<touch_location>::iterator it = ranges.begin(); it != ranges.end(); ++it) {
touch_pages(it->root, it->length);
pm.hit();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
pm.finished();
@@ -900,7 +905,7 @@ boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::next() {
return {};
auto out = _curr.toRecordId();
advance();
- return {{out, _rs->dataFor(_txn, out)}};
+ return {{out, _rs->dataFor(_opCtx, out)}};
}
void RecordStoreV1Base::IntraExtentIterator::advance() {
@@ -912,13 +917,13 @@ void RecordStoreV1Base::IntraExtentIterator::advance() {
_curr = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(_curr.a(), nextOfs));
}
-void RecordStoreV1Base::IntraExtentIterator::invalidate(OperationContext* txn,
+void RecordStoreV1Base::IntraExtentIterator::invalidate(OperationContext* opCtx,
const RecordId& rid) {
if (rid == _curr.toRecordId()) {
const DiskLoc origLoc = _curr;
// Undo the advance on rollback, as the deletion that forced it "never happened".
- txn->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
+ opCtx->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
advance();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index ae7f5dd656e..6dadf5487da 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -50,10 +50,10 @@ public:
virtual ~RecordStoreV1MetaData() {}
virtual const DiskLoc& capExtent() const = 0;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
virtual const DiskLoc& capFirstNewRecord() const = 0;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc) = 0;
bool capLooped() const {
return capFirstNewRecord().isValid();
@@ -62,36 +62,36 @@ public:
virtual long long dataSize() const = 0;
virtual long long numRecords() const = 0;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) = 0;
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords) = 0;
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords) = 0;
virtual DiskLoc deletedListEntry(int bucket) const = 0;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc) = 0;
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc) = 0;
virtual DiskLoc deletedListLegacyGrabBag() const = 0;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc) = 0;
- virtual void orphanDeletedList(OperationContext* txn) = 0;
+ virtual void orphanDeletedList(OperationContext* opCtx) = 0;
- virtual const DiskLoc& firstExtent(OperationContext* txn) const = 0;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const = 0;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
- virtual const DiskLoc& lastExtent(OperationContext* txn) const = 0;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const = 0;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc) = 0;
virtual bool isCapped() const = 0;
virtual bool isUserFlagSet(int flag) const = 0;
virtual int userFlags() const = 0;
- virtual bool setUserFlag(OperationContext* txn, int flag) = 0;
- virtual bool clearUserFlag(OperationContext* txn, int flag) = 0;
- virtual bool replaceUserFlags(OperationContext* txn, int flags) = 0;
+ virtual bool setUserFlag(OperationContext* opCtx, int flag) = 0;
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag) = 0;
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags) = 0;
- virtual int lastExtentSize(OperationContext* txn) const = 0;
- virtual void setLastExtentSize(OperationContext* txn, int newMax) = 0;
+ virtual int lastExtentSize(OperationContext* opCtx) const = 0;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax) = 0;
virtual long long maxCappedDocs() const = 0;
};
@@ -172,34 +172,34 @@ public:
virtual ~RecordStoreV1Base();
- virtual long long dataSize(OperationContext* txn) const {
+ virtual long long dataSize(OperationContext* opCtx) const {
return _details->dataSize();
}
- virtual long long numRecords(OperationContext* txn) const {
+ virtual long long numRecords(OperationContext* opCtx) const {
return _details->numRecords();
}
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int level = 0) const;
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const;
- void deleteRecord(OperationContext* txn, const RecordId& dl);
+ void deleteRecord(OperationContext* opCtx, const RecordId& dl);
- StatusWith<RecordId> insertRecord(OperationContext* txn,
+ StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- Status insertRecordsWithDocWriter(OperationContext* txn,
+ Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) final;
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -208,27 +208,27 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const;
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* opCtx) const;
- void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
+ void increaseStorageSize(OperationContext* opCtx, int size, bool enforceQuota);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
const RecordStoreV1MetaData* details() const {
return _details.get();
@@ -237,13 +237,13 @@ public:
// This keeps track of cursors saved during yielding, for invalidation purposes.
SavedCursorRegistry savedCursors;
- DiskLoc getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getExtentLocForRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getNextRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc getPrevRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
/**
* Quantize 'minSize' to the nearest allocation size.
@@ -255,9 +255,9 @@ public:
/* return which "deleted bucket" for this size object */
static int bucket(int size);
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override {}
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override {}
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
invariant(false); // MMAPv1 has its own repair which doesn't call this.
@@ -272,43 +272,43 @@ protected:
virtual bool shouldPadInserts() const = 0;
- virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) = 0;
// TODO: document, remove, what have you
- virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) = 0;
+ virtual void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) = 0;
// TODO: another sad one
virtual DeletedRecord* drec(const DiskLoc& loc) const;
// just a wrapper for _extentManager->getExtent( loc );
- Extent* _getExtent(OperationContext* txn, const DiskLoc& loc) const;
+ Extent* _getExtent(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getExtentLocForRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc _getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getNextRecord(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecord(OperationContext* opCtx, const DiskLoc& loc) const;
- DiskLoc _getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
- DiskLoc _getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getNextRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecordInExtent(OperationContext* opCtx, const DiskLoc& loc) const;
/**
* finds the first suitable DiskLoc for data
* will return the DiskLoc of a newly created DeletedRecord
*/
- DiskLoc _findFirstSpot(OperationContext* txn, const DiskLoc& extDiskLoc, Extent* e);
+ DiskLoc _findFirstSpot(OperationContext* opCtx, const DiskLoc& extDiskLoc, Extent* e);
/** add a record to the end of the linked list chain within this extent.
require: you must have already declared write intent for the record header.
*/
- void _addRecordToRecListInExtent(OperationContext* txn, MmapV1RecordHeader* r, DiskLoc loc);
+ void _addRecordToRecListInExtent(OperationContext* opCtx, MmapV1RecordHeader* r, DiskLoc loc);
/**
* internal
* doesn't check inputs or change padding
*/
- StatusWith<RecordId> _insertRecord(OperationContext* txn,
+ StatusWith<RecordId> _insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
@@ -327,23 +327,23 @@ protected:
*/
class RecordStoreV1Base::IntraExtentIterator final : public RecordCursor {
public:
- IntraExtentIterator(OperationContext* txn,
+ IntraExtentIterator(OperationContext* opCtx,
DiskLoc start,
const RecordStoreV1Base* rs,
bool forward = true)
- : _txn(txn), _curr(start), _rs(rs), _forward(forward) {}
+ : _opCtx(opCtx), _curr(start), _rs(rs), _forward(forward) {}
boost::optional<Record> next() final;
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
void save() final {}
bool restore() final {
return true;
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
@@ -354,7 +354,7 @@ private:
void advance();
- OperationContext* _txn;
+ OperationContext* _opCtx;
DiskLoc _curr;
const RecordStoreV1Base* _rs;
bool _forward;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index a4c62cf2d63..47b65e7dd16 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -68,14 +68,14 @@ using std::endl;
using std::hex;
using std::vector;
-CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* txn,
+CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* opCtx,
CappedCallback* collection,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
bool isSystemIndexes)
: RecordStoreV1Base(ns, details, em, isSystemIndexes), _cappedCallback(collection) {
- DiskLoc extentLoc = details->firstExtent(txn);
+ DiskLoc extentLoc = details->firstExtent(opCtx);
while (!extentLoc.isNull()) {
_extentAdvice.push_back(_extentManager->cacheHint(extentLoc, ExtentManager::Sequential));
Extent* extent = em->getExtent(extentLoc);
@@ -83,12 +83,12 @@ CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* txn,
}
// this is for VERY VERY old versions of capped collections
- cappedCheckMigrate(txn);
+ cappedCheckMigrate(opCtx);
}
CappedRecordStoreV1::~CappedRecordStoreV1() {}
-StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
+StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* opCtx,
int lenToAlloc,
bool enforceQuota) {
{
@@ -100,12 +100,12 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// the extent check is a way to try and improve performance
// since we have to iterate all the extents (for now) to get
// storage size
- if (lenToAlloc > storageSize(txn)) {
+ if (lenToAlloc > storageSize(opCtx)) {
return StatusWith<DiskLoc>(
ErrorCodes::DocTooLargeForCapped,
mongoutils::str::stream() << "document is larger than capped size " << lenToAlloc
<< " > "
- << storageSize(txn),
+ << storageSize(opCtx),
16328);
}
}
@@ -114,7 +114,7 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// signal done allocating new extents.
if (!cappedLastDelRecLastExtent().isValid())
- setLastDelRecLastExtent(txn, DiskLoc());
+ setLastDelRecLastExtent(opCtx, DiskLoc());
invariant(lenToAlloc < 400000000);
int passes = 0;
@@ -128,17 +128,17 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
DiskLoc firstEmptyExtent; // This prevents us from infinite looping.
while (1) {
if (_details->numRecords() < _details->maxCappedDocs()) {
- loc = __capAlloc(txn, lenToAlloc);
+ loc = __capAlloc(opCtx, lenToAlloc);
if (!loc.isNull())
break;
}
// If on first iteration through extents, don't delete anything.
if (!_details->capFirstNewRecord().isValid()) {
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
- if (_details->capExtent() != _details->firstExtent(txn))
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ if (_details->capExtent() != _details->firstExtent(opCtx))
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
// else signal done with first iteration through extents.
continue;
}
@@ -147,37 +147,37 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
theCapExtent()->firstRecord == _details->capFirstNewRecord()) {
// We've deleted all records that were allocated on the previous
// iteration through this extent.
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
continue;
}
if (theCapExtent()->firstRecord.isNull()) {
if (firstEmptyExtent.isNull())
firstEmptyExtent = _details->capExtent();
- advanceCapExtent(txn, _ns);
+ advanceCapExtent(opCtx, _ns);
if (firstEmptyExtent == _details->capExtent()) {
// All records have been deleted but there is still no room for this record.
// Nothing we can do but fail.
- _maybeComplain(txn, lenToAlloc);
+ _maybeComplain(opCtx, lenToAlloc);
return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
str::stream()
<< "document doesn't fit in capped collection."
<< " size: "
<< lenToAlloc
<< " storageSize:"
- << storageSize(txn),
+ << storageSize(opCtx),
28575);
}
continue;
}
const RecordId fr = theCapExtent()->firstRecord.toRecordId();
- Status status = _cappedCallback->aboutToDeleteCapped(txn, fr, dataFor(txn, fr));
+ Status status = _cappedCallback->aboutToDeleteCapped(opCtx, fr, dataFor(opCtx, fr));
if (!status.isOK())
return StatusWith<DiskLoc>(status);
- deleteRecord(txn, fr);
+ deleteRecord(opCtx, fr);
- _compact(txn);
+ _compact(opCtx);
if ((++passes % 5000) == 0) {
StringBuilder sb;
log() << "passes = " << passes << " in CappedRecordStoreV1::allocRecord:"
@@ -191,7 +191,7 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// Remember first record allocated on this iteration through capExtent.
if (_details->capFirstNewRecord().isValid() && _details->capFirstNewRecord().isNull())
- _details->setCapFirstNewRecord(txn, loc);
+ _details->setCapFirstNewRecord(opCtx, loc);
}
invariant(!loc.isNull());
@@ -208,53 +208,55 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
int left = regionlen - lenToAlloc;
/* split off some for further use. */
- txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
+ opCtx->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
DeletedRecord* newDel = drec(newDelLoc);
- DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel);
+ DeletedRecord* newDelW = opCtx->recoveryUnit()->writing(newDel);
newDelW->extentOfs() = r->extentOfs();
newDelW->lengthWithHeaders() = left;
newDelW->nextDeleted().Null();
- addDeletedRec(txn, newDelLoc);
+ addDeletedRec(opCtx, newDelLoc);
return StatusWith<DiskLoc>(loc);
}
-Status CappedRecordStoreV1::truncate(OperationContext* txn) {
- setLastDelRecLastExtent(txn, DiskLoc());
- setListOfAllDeletedRecords(txn, DiskLoc());
+Status CappedRecordStoreV1::truncate(OperationContext* opCtx) {
+ setLastDelRecLastExtent(opCtx, DiskLoc());
+ setListOfAllDeletedRecords(opCtx, DiskLoc());
// preserve firstExtent/lastExtent
- _details->setCapExtent(txn, _details->firstExtent(txn));
- _details->setStats(txn, 0, 0);
+ _details->setCapExtent(opCtx, _details->firstExtent(opCtx));
+ _details->setStats(opCtx, 0, 0);
// preserve lastExtentSize
// nIndexes preserve 0
// capped preserve true
// max preserve
// paddingFactor is unused
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
- setLastDelRecLastExtent(txn, DiskLoc().setInvalid());
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
+ setLastDelRecLastExtent(opCtx, DiskLoc().setInvalid());
// dataFileVersion preserve
// indexFileVersion preserve
// Reset all existing extents and recreate the deleted list.
Extent* ext;
- for (DiskLoc extLoc = _details->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ for (DiskLoc extLoc = _details->firstExtent(opCtx); !extLoc.isNull(); extLoc = ext->xnext) {
ext = _extentManager->getExtent(extLoc);
- txn->recoveryUnit()->writing(&ext->firstRecord)->Null();
- txn->recoveryUnit()->writing(&ext->lastRecord)->Null();
+ opCtx->recoveryUnit()->writing(&ext->firstRecord)->Null();
+ opCtx->recoveryUnit()->writing(&ext->lastRecord)->Null();
- addDeletedRec(txn, _findFirstSpot(txn, extLoc, ext));
+ addDeletedRec(opCtx, _findFirstSpot(opCtx, extLoc, ext));
}
return Status::OK();
}
-void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
- cappedTruncateAfter(txn, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive);
+void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* opCtx,
+ RecordId end,
+ bool inclusive) {
+ cappedTruncateAfter(opCtx, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive);
}
/* combine adjacent deleted records *for the current extent* of the capped collection
@@ -262,7 +264,7 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn, RecordId en
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
-void CappedRecordStoreV1::_compact(OperationContext* txn) {
+void CappedRecordStoreV1::_compact(OperationContext* opCtx) {
DDD("CappedRecordStoreV1::compact enter");
vector<DiskLoc> drecs;
@@ -274,7 +276,7 @@ void CappedRecordStoreV1::_compact(OperationContext* txn) {
drecs.push_back(i);
}
- setFirstDeletedInCurExtent(txn, i);
+ setFirstDeletedInCurExtent(opCtx, i);
std::sort(drecs.begin(), drecs.end());
DDD("\t drecs.size(): " << drecs.size());
@@ -286,24 +288,24 @@ void CappedRecordStoreV1::_compact(OperationContext* txn) {
j++;
if (j == drecs.end()) {
DDD("\t compact adddelrec");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
break;
}
DiskLoc b = *j;
while (a.a() == b.a() && a.getOfs() + drec(a)->lengthWithHeaders() == b.getOfs()) {
// a & b are adjacent. merge.
- txn->recoveryUnit()->writingInt(drec(a)->lengthWithHeaders()) +=
+ opCtx->recoveryUnit()->writingInt(drec(a)->lengthWithHeaders()) +=
drec(b)->lengthWithHeaders();
j++;
if (j == drecs.end()) {
DDD("\t compact adddelrec2");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
return;
}
b = *j;
}
DDD("\t compact adddelrec3");
- addDeletedRec(txn, a);
+ addDeletedRec(opCtx, a);
a = b;
}
}
@@ -315,18 +317,18 @@ DiskLoc CappedRecordStoreV1::cappedFirstDeletedInCurExtent() const {
return drec(cappedLastDelRecLastExtent())->nextDeleted();
}
-void CappedRecordStoreV1::setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc) {
+void CappedRecordStoreV1::setFirstDeletedInCurExtent(OperationContext* opCtx, const DiskLoc& loc) {
if (cappedLastDelRecLastExtent().isNull())
- setListOfAllDeletedRecords(txn, loc);
+ setListOfAllDeletedRecords(opCtx, loc);
else
- *txn->recoveryUnit()->writing(&drec(cappedLastDelRecLastExtent())->nextDeleted()) = loc;
+ *opCtx->recoveryUnit()->writing(&drec(cappedLastDelRecLastExtent())->nextDeleted()) = loc;
}
-void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
+void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* opCtx) {
// migrate old RecordStoreV1MetaData format
if (_details->capExtent().a() == 0 && _details->capExtent().getOfs() == 0) {
- WriteUnitOfWork wunit(txn);
- _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ WriteUnitOfWork wunit(opCtx);
+ _details->setCapFirstNewRecord(opCtx, DiskLoc().setInvalid());
// put all the DeletedRecords in cappedListOfAllDeletedRecords()
for (int i = 1; i < Buckets; ++i) {
DiskLoc first = _details->deletedListEntry(i);
@@ -335,15 +337,15 @@ void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
DiskLoc last = first;
for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted())
;
- *txn->recoveryUnit()->writing(&drec(last)->nextDeleted()) =
+ *opCtx->recoveryUnit()->writing(&drec(last)->nextDeleted()) =
cappedListOfAllDeletedRecords();
- setListOfAllDeletedRecords(txn, first);
- _details->setDeletedListEntry(txn, i, DiskLoc());
+ setListOfAllDeletedRecords(opCtx, first);
+ _details->setDeletedListEntry(opCtx, i, DiskLoc());
}
// NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
// Last, in case we're killed before getting here
- _details->setCapExtent(txn, _details->firstExtent(txn));
+ _details->setCapExtent(opCtx, _details->firstExtent(opCtx));
wunit.commit();
}
}
@@ -370,29 +372,30 @@ bool CappedRecordStoreV1::nextIsInCapExtent(const DiskLoc& dl) const {
return inCapExtent(next);
}
-void CappedRecordStoreV1::advanceCapExtent(OperationContext* txn, StringData ns) {
+void CappedRecordStoreV1::advanceCapExtent(OperationContext* opCtx, StringData ns) {
// We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
// (or DiskLoc() if new capExtent == firstExtent)
- if (_details->capExtent() == _details->lastExtent(txn))
- setLastDelRecLastExtent(txn, DiskLoc());
+ if (_details->capExtent() == _details->lastExtent(opCtx))
+ setLastDelRecLastExtent(opCtx, DiskLoc());
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
for (; !i.isNull() && nextIsInCapExtent(i); i = drec(i)->nextDeleted())
;
- setLastDelRecLastExtent(txn, i);
+ setLastDelRecLastExtent(opCtx, i);
}
- _details->setCapExtent(
- txn, theCapExtent()->xnext.isNull() ? _details->firstExtent(txn) : theCapExtent()->xnext);
+ _details->setCapExtent(opCtx,
+ theCapExtent()->xnext.isNull() ? _details->firstExtent(opCtx)
+ : theCapExtent()->xnext);
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
// dassert( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
- _details->setCapFirstNewRecord(txn, DiskLoc());
+ _details->setCapFirstNewRecord(opCtx, DiskLoc());
}
-DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
+DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* opCtx, int len) {
DiskLoc prev = cappedLastDelRecLastExtent();
DiskLoc i = cappedFirstDeletedInCurExtent();
DiskLoc ret;
@@ -408,10 +411,10 @@ DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
/* unlink ourself from the deleted list */
if (!ret.isNull()) {
if (prev.isNull())
- setListOfAllDeletedRecords(txn, drec(ret)->nextDeleted());
+ setListOfAllDeletedRecords(opCtx, drec(ret)->nextDeleted());
else
- *txn->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
- *txn->recoveryUnit()->writing(&drec(ret)->nextDeleted()) =
+ *opCtx->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
+ *opCtx->recoveryUnit()->writing(&drec(ret)->nextDeleted()) =
DiskLoc().setInvalid(); // defensive.
invariant(drec(ret)->extentOfs() < ret.getOfs());
}
@@ -419,12 +422,12 @@ DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
return ret;
}
-void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
- if (_details->capExtent() == _details->firstExtent(txn)) {
+void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* opCtx) {
+ if (_details->capExtent() == _details->firstExtent(opCtx)) {
// Only one extent of the collection is in use, so there
// is no deleted record in a previous extent, so nullify
// cappedLastDelRecLastExtent().
- setLastDelRecLastExtent(txn, DiskLoc());
+ setLastDelRecLastExtent(opCtx, DiskLoc());
} else {
// Scan through all deleted records in the collection
// until the last deleted record for the extent prior
@@ -439,11 +442,11 @@ void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
invariant(!drec(i)->nextDeleted().isNull());
- setLastDelRecLastExtent(txn, i);
+ setLastDelRecLastExtent(opCtx, i);
}
}
-void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
+void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* opCtx,
const char* ns,
DiskLoc end,
bool inclusive) {
@@ -476,13 +479,13 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
// this case instead of asserting.
uassert(13415, "emptying the collection is not allowed", _details->numRecords() > 1);
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Delete the newest record, and coalesce the new deleted
// record with existing deleted records.
- Status status = _cappedCallback->aboutToDeleteCapped(txn, currId, dataFor(txn, currId));
+ Status status = _cappedCallback->aboutToDeleteCapped(opCtx, currId, dataFor(opCtx, currId));
uassertStatusOK(status);
- deleteRecord(txn, currId);
- _compact(txn);
+ deleteRecord(opCtx, currId);
+ _compact(opCtx);
// This is the case where we have not yet had to remove any
// documents to make room for other documents, and we are allocating
@@ -497,11 +500,11 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
// NOTE Because we didn't delete the last document, and
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
- _details->setCapExtent(txn, theCapExtent()->xprev);
+ _details->setCapExtent(opCtx, theCapExtent()->xprev);
theCapExtent()->assertOk();
// update cappedLastDelRecLastExtent()
- cappedTruncateLastDelUpdate(txn);
+ cappedTruncateLastDelUpdate(opCtx);
}
wunit.commit();
continue;
@@ -524,20 +527,20 @@ void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
DiskLoc newCapExtent = _details->capExtent();
do {
// Find the previous extent, looping if necessary.
- newCapExtent = (newCapExtent == _details->firstExtent(txn))
- ? _details->lastExtent(txn)
+ newCapExtent = (newCapExtent == _details->firstExtent(opCtx))
+ ? _details->lastExtent(opCtx)
: _extentManager->getExtent(newCapExtent)->xprev;
_extentManager->getExtent(newCapExtent)->assertOk();
} while (_extentManager->getExtent(newCapExtent)->firstRecord.isNull());
- _details->setCapExtent(txn, newCapExtent);
+ _details->setCapExtent(opCtx, newCapExtent);
// Place all documents in the new capExtent on the fresh side
// of the capExtent by setting capFirstNewRecord to the first
// document in the new capExtent.
- _details->setCapFirstNewRecord(txn, theCapExtent()->firstRecord);
+ _details->setCapFirstNewRecord(opCtx, theCapExtent()->firstRecord);
// update cappedLastDelRecLastExtent()
- cappedTruncateLastDelUpdate(txn);
+ cappedTruncateLastDelUpdate(opCtx);
}
wunit.commit();
@@ -548,62 +551,63 @@ DiskLoc CappedRecordStoreV1::cappedListOfAllDeletedRecords() const {
return _details->deletedListEntry(0);
}
-void CappedRecordStoreV1::setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc) {
- return _details->setDeletedListEntry(txn, 0, loc);
+void CappedRecordStoreV1::setListOfAllDeletedRecords(OperationContext* opCtx, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(opCtx, 0, loc);
}
DiskLoc CappedRecordStoreV1::cappedLastDelRecLastExtent() const {
return _details->deletedListEntry(1);
}
-void CappedRecordStoreV1::setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc) {
- return _details->setDeletedListEntry(txn, 1, loc);
+void CappedRecordStoreV1::setLastDelRecLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(opCtx, 1, loc);
}
Extent* CappedRecordStoreV1::theCapExtent() const {
return _extentManager->getExtent(_details->capExtent());
}
-void CappedRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
- DeletedRecord* d = txn->recoveryUnit()->writing(drec(dloc));
+void CappedRecordStoreV1::addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) {
+ DeletedRecord* d = opCtx->recoveryUnit()->writing(drec(dloc));
if (!cappedLastDelRecLastExtent().isValid()) {
// Initial extent allocation. Insert at end.
d->nextDeleted() = DiskLoc();
if (cappedListOfAllDeletedRecords().isNull())
- setListOfAllDeletedRecords(txn, dloc);
+ setListOfAllDeletedRecords(opCtx, dloc);
else {
DiskLoc i = cappedListOfAllDeletedRecords();
for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted())
;
- *txn->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
+ *opCtx->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
}
} else {
d->nextDeleted() = cappedFirstDeletedInCurExtent();
- setFirstDeletedInCurExtent(txn, dloc);
+ setFirstDeletedInCurExtent(opCtx, dloc);
// always _compact() after this so order doesn't matter
}
}
-std::unique_ptr<SeekableRecordCursor> CappedRecordStoreV1::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> CappedRecordStoreV1::getCursor(OperationContext* opCtx,
bool forward) const {
- return stdx::make_unique<CappedRecordStoreV1Iterator>(txn, this, forward);
+ return stdx::make_unique<CappedRecordStoreV1Iterator>(opCtx, this, forward);
}
vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
vector<std::unique_ptr<RecordCursor>> cursors;
if (!_details->capLooped()) {
// if we haven't looped yet, just spit out all extents (same as non-capped impl)
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(opCtx); !extLoc.isNull();
+ extLoc = ext->xnext) {
+ ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord.isNull())
continue;
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
}
} else {
// if we've looped we need to iterate the extents, starting and ending with the
@@ -615,40 +619,40 @@ vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
// First do the "old" portion of capExtent if there is any
DiskLoc extLoc = capExtent;
{
- const Extent* ext = _getExtent(txn, extLoc);
+ const Extent* ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord != details()->capFirstNewRecord()) {
// this means there is old data in capExtent
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
}
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(opCtx) : ext->xnext;
}
// Next handle all the other extents
while (extLoc != capExtent) {
- const Extent* ext = _getExtent(txn, extLoc);
+ const Extent* ext = _getExtent(opCtx, extLoc);
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
+ opCtx, ext->firstRecord, this));
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(opCtx) : ext->xnext;
}
// Finally handle the "new" data in the capExtent
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, details()->capFirstNewRecord(), this));
+ opCtx, details()->capFirstNewRecord(), this));
}
return cursors;
}
-void CappedRecordStoreV1::_maybeComplain(OperationContext* txn, int len) const {
+void CappedRecordStoreV1::_maybeComplain(OperationContext* opCtx, int len) const {
RARELY {
std::stringstream buf;
buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
- buf << "numRecords: " << numRecords(txn) << '\n';
+ buf << "numRecords: " << numRecords(opCtx) << '\n';
int i = 0;
- for (DiskLoc e = _details->firstExtent(txn); !e.isNull();
+ for (DiskLoc e = _details->firstExtent(opCtx); !e.isNull();
e = _extentManager->getExtent(e)->xnext, ++i) {
buf << " Extent " << i;
if (e == _details->capExtent())
@@ -666,12 +670,13 @@ void CappedRecordStoreV1::_maybeComplain(OperationContext* txn, int len) const {
warning() << buf.str();
// assume it is unusually large record; if not, something is broken
- fassert(17438, len * 5 > _details->lastExtentSize(txn));
+ fassert(17438, len * 5 > _details->lastExtentSize(opCtx));
}
}
-DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* txn, const DiskLoc& startExtent) const {
- for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(txn) : startExtent; !i.isNull();
+DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* opCtx,
+ const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(opCtx) : startExtent; !i.isNull();
i = _extentManager->getExtent(i)->xnext) {
Extent* e = _extentManager->getExtent(i);
@@ -681,8 +686,8 @@ DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* txn, const DiskLoc& s
return DiskLoc();
}
-DiskLoc CappedRecordStoreV1::lastRecord(OperationContext* txn, const DiskLoc& startExtent) const {
- for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(txn) : startExtent; !i.isNull();
+DiskLoc CappedRecordStoreV1::lastRecord(OperationContext* opCtx, const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(opCtx) : startExtent; !i.isNull();
i = _extentManager->getExtent(i)->xprev) {
Extent* e = _extentManager->getExtent(i);
if (!e->lastRecord.isNull())
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
index 3fc64a76cd9..d74fc7c65ea 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
@@ -40,7 +40,7 @@ namespace mongo {
class CappedRecordStoreV1 final : public RecordStoreV1Base {
public:
- CappedRecordStoreV1(OperationContext* txn,
+ CappedRecordStoreV1(OperationContext* opCtx,
CappedCallback* collection,
StringData ns,
RecordStoreV1MetaData* details,
@@ -53,7 +53,7 @@ public:
return "CappedRecordStoreV1";
}
- Status truncate(OperationContext* txn) final;
+ Status truncate(OperationContext* opCtx) final;
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -61,17 +61,17 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) final;
+ void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) final;
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
// Start from firstExtent by default.
- DiskLoc firstRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+ DiskLoc firstRecord(OperationContext* opCtx, const DiskLoc& startExtent = DiskLoc()) const;
// Start from lastExtent by default.
- DiskLoc lastRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+ DiskLoc lastRecord(OperationContext* opCtx, const DiskLoc& startExtent = DiskLoc()) const;
protected:
bool isCapped() const final {
@@ -85,28 +85,28 @@ protected:
_cappedCallback = cb;
}
- StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) final;
- void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) final;
+ void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) final;
private:
// -- start copy from cap.cpp --
- void _compact(OperationContext* txn);
+ void _compact(OperationContext* opCtx);
DiskLoc cappedFirstDeletedInCurExtent() const;
- void setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc);
- void cappedCheckMigrate(OperationContext* txn);
- DiskLoc __capAlloc(OperationContext* txn, int len);
+ void setFirstDeletedInCurExtent(OperationContext* opCtx, const DiskLoc& loc);
+ void cappedCheckMigrate(OperationContext* opCtx);
+ DiskLoc __capAlloc(OperationContext* opCtx, int len);
bool inCapExtent(const DiskLoc& dl) const;
DiskLoc cappedListOfAllDeletedRecords() const;
DiskLoc cappedLastDelRecLastExtent() const;
- void setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc);
- void setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc);
+ void setListOfAllDeletedRecords(OperationContext* opCtx, const DiskLoc& loc);
+ void setLastDelRecLastExtent(OperationContext* opCtx, const DiskLoc& loc);
Extent* theCapExtent() const;
bool nextIsInCapExtent(const DiskLoc& dl) const;
- void advanceCapExtent(OperationContext* txn, StringData ns);
- void cappedTruncateLastDelUpdate(OperationContext* txn);
+ void advanceCapExtent(OperationContext* opCtx, StringData ns);
+ void cappedTruncateLastDelUpdate(OperationContext* opCtx);
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -114,9 +114,9 @@ private:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- void cappedTruncateAfter(OperationContext* txn, const char* ns, DiskLoc end, bool inclusive);
+ void cappedTruncateAfter(OperationContext* opCtx, const char* ns, DiskLoc end, bool inclusive);
- void _maybeComplain(OperationContext* txn, int len) const;
+ void _maybeComplain(OperationContext* opCtx, int len) const;
// -- end copy from cap.cpp --
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
index cdd8363d949..20324ffe5ee 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
@@ -39,10 +39,10 @@ namespace mongo {
//
// Capped collection traversal
//
-CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
+CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* opCtx,
const CappedRecordStoreV1* collection,
bool forward)
- : _txn(txn), _recordStore(collection), _forward(forward) {
+ : _opCtx(opCtx), _recordStore(collection), _forward(forward) {
const RecordStoreV1MetaData* nsd = _recordStore->details();
// If a start position isn't specified, we fill one out from the start of the
@@ -51,7 +51,7 @@ CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
// Going forwards.
if (!nsd->capLooped()) {
// If our capped collection doesn't loop around, the first record is easy.
- _curr = collection->firstRecord(_txn);
+ _curr = collection->firstRecord(_opCtx);
} else {
// Our capped collection has "looped' around.
// Copied verbatim from ForwardCappedCursor::init.
@@ -66,7 +66,7 @@ CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
// Going backwards
if (!nsd->capLooped()) {
// Start at the end.
- _curr = collection->lastRecord(_txn);
+ _curr = collection->lastRecord(_opCtx);
} else {
_curr = _getExtent(nsd->capExtent())->lastRecord;
}
@@ -78,15 +78,15 @@ boost::optional<Record> CappedRecordStoreV1Iterator::next() {
return {};
auto toReturn = _curr.toRecordId();
_curr = getNextCapped(_curr);
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+ return {{toReturn, _recordStore->RecordStore::dataFor(_opCtx, toReturn)}};
}
boost::optional<Record> CappedRecordStoreV1Iterator::seekExact(const RecordId& id) {
_curr = getNextCapped(DiskLoc::fromRecordId(id));
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+ return {{id, _recordStore->RecordStore::dataFor(_opCtx, id)}};
}
-void CappedRecordStoreV1Iterator::invalidate(OperationContext* txn, const RecordId& id) {
+void CappedRecordStoreV1Iterator::invalidate(OperationContext* opCtx, const RecordId& id) {
const DiskLoc dl = DiskLoc::fromRecordId(id);
if (dl == _curr) {
// We *could* move to the next thing, since there is actually a next
@@ -179,7 +179,7 @@ DiskLoc CappedRecordStoreV1Iterator::nextLoop(const DiskLoc& prev) {
if (!next.isNull()) {
return next;
}
- return _recordStore->firstRecord(_txn);
+ return _recordStore->firstRecord(_opCtx);
}
DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
@@ -188,7 +188,7 @@ DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
if (!prev.isNull()) {
return prev;
}
- return _recordStore->lastRecord(_txn);
+ return _recordStore->lastRecord(_opCtx);
}
@@ -197,11 +197,11 @@ Extent* CappedRecordStoreV1Iterator::_getExtent(const DiskLoc& loc) {
}
DiskLoc CappedRecordStoreV1Iterator::_getNextRecord(const DiskLoc& loc) {
- return _recordStore->getNextRecord(_txn, loc);
+ return _recordStore->getNextRecord(_opCtx, loc);
}
DiskLoc CappedRecordStoreV1Iterator::_getPrevRecord(const DiskLoc& loc) {
- return _recordStore->getPrevRecord(_txn, loc);
+ return _recordStore->getPrevRecord(_opCtx, loc);
}
std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForNext() const {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
index 275c78cae38..08065109c3f 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
@@ -43,7 +43,7 @@ struct Extent;
*/
class CappedRecordStoreV1Iterator final : public SeekableRecordCursor {
public:
- CappedRecordStoreV1Iterator(OperationContext* txn,
+ CappedRecordStoreV1Iterator(OperationContext* opCtx,
const CappedRecordStoreV1* collection,
bool forward);
@@ -52,12 +52,12 @@ public:
void save() final;
bool restore() final;
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
@@ -80,7 +80,7 @@ private:
DiskLoc _getPrevRecord(const DiskLoc& loc);
// transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ OperationContext* _opCtx;
// The collection we're iterating over.
const CappedRecordStoreV1* const _recordStore;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index 2bde7396e44..de02abcf76b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -51,7 +51,7 @@ char zeros[20 * 1024 * 1024] = {};
class DummyCappedCallback : public CappedCallback {
public:
- Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data) {
+ Status aboutToDeleteCapped(OperationContext* opCtx, const RecordId& loc, RecordData data) {
deleted.push_back(DiskLoc::fromRecordId(loc));
return Status::OK();
}
@@ -62,35 +62,35 @@ public:
};
void simpleInsertTest(const char* buf, int size) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
string myns = "test.simple1";
- CappedRecordStoreV1 rs(&txn, &cb, myns, md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, myns, md, &em, false);
- rs.increaseStorageSize(&txn, 1024, false);
+ rs.increaseStorageSize(&opCtx, 1024, false);
- ASSERT_NOT_OK(rs.insertRecord(&txn, buf, 3, 1000).getStatus());
+ ASSERT_NOT_OK(rs.insertRecord(&opCtx, buf, 3, 1000).getStatus());
- rs.insertRecord(&txn, buf, size, 10000);
+ rs.insertRecord(&opCtx, buf, size, 10000);
{
BSONObjBuilder b;
- int64_t storageSize = rs.storageSize(&txn, &b);
+ int64_t storageSize = rs.storageSize(&opCtx, &b);
BSONObj obj = b.obj();
ASSERT_EQUALS(1, obj["numExtents"].numberInt());
ASSERT_EQUALS(storageSize, em.quantizeExtentSize(1024));
}
for (int i = 0; i < 1000; i++) {
- ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
+ ASSERT_OK(rs.insertRecord(&opCtx, buf, size, 10000).getStatus());
}
long long start = md->numRecords();
for (int i = 0; i < 1000; i++) {
- ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
+ ASSERT_OK(rs.insertRecord(&opCtx, buf, size, 10000).getStatus());
}
ASSERT_EQUALS(start, md->numRecords());
ASSERT_GREATER_THAN(start, 100);
@@ -105,37 +105,37 @@ TEST(CappedRecordStoreV1, SimpleInsertSize8) {
}
TEST(CappedRecordStoreV1, EmptySingleExtent) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1100), 900}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
}
TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -145,12 +145,12 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -162,18 +162,18 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -183,12 +183,12 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -200,7 +200,7 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
{DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
{DiskLoc(0, 1500), 50}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -210,11 +210,11 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
* Current code always tries to leave 24 bytes to create a DeletedRecord.
*/
TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -224,12 +224,12 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 123}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -240,18 +240,18 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
LocAndSize drecs[] = {{DiskLoc(0, 1100), 100}, // gap after newest record
{DiskLoc(0, 1500), 123}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{DiskLoc(0, 1000), 100},
@@ -261,12 +261,12 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
{DiskLoc(0, 1400), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1500), 124}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -278,30 +278,30 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1600), 24}, // gap at end of extent
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
}
TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Two extents, each with 1000 bytes.
LocAndSize records[] = {
{DiskLoc(0, 1000), 500}, {DiskLoc(0, 1500), 300}, {DiskLoc(0, 1800), 100}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -311,18 +311,18 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
{DiskLoc(1, 1000), 100},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1100), 900}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
}
TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Two extents, each with 1000 bytes.
@@ -334,12 +334,12 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
{DiskLoc(1, 1300), 600},
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1900), 100}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc(0, 1000));
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -350,7 +350,7 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
{}};
LocAndSize drecs[] = {
{DiskLoc(0, 1800), 200}, {DiskLoc(1, 1200), 100}, {DiskLoc(1, 1900), 100}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(1, 1000));
}
@@ -358,43 +358,43 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
// Larger than storageSize (fails early)
TEST(CappedRecordStoreV1, OversizedRecordHuge) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- StatusWith<RecordId> status = rs.insertRecord(&txn, zeros, 16000, false);
+ StatusWith<RecordId> status = rs.insertRecord(&opCtx, zeros, 16000, false);
ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
ASSERT_EQUALS(status.getStatus().location(), 16328);
}
// Smaller than storageSize, but larger than usable space (fails late)
TEST(CappedRecordStoreV1, OversizedRecordMedium) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid());
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
StatusWith<RecordId> status =
- rs.insertRecord(&txn, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
ASSERT_EQUALS(status.getStatus().location(), 28575);
}
@@ -409,28 +409,28 @@ TEST(CappedRecordStoreV1, OversizedRecordMedium) {
* This is a minimal example that shows the current allocator laying out records out-of-order.
*/
TEST(CappedRecordStoreV1Scrambler, Minimal) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Starting with a single empty 1000 byte extent.
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
rs.insertRecord(
- &txn, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
- rs.insertRecord(&txn, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
+ &opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
+ rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
rs.insertRecord(
- &txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+ &opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
{
LocAndSize recs[] = {{DiskLoc(0, 1500), 300}, // 2nd insert
@@ -439,7 +439,7 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
{DiskLoc(0, 1400), 60}, // 5th
{}};
LocAndSize drecs[] = {{DiskLoc(0, 1460), 40}, {DiskLoc(0, 1920), 80}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -450,51 +450,51 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
* that leaves 4 deleted records in a single extent.
*/
TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
DummyCappedCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ CappedRecordStoreV1 rs(&opCtx, &cb, "test.foo", md, &em, false);
{
// Starting with a single empty 1000 byte extent.
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
// This list of sizes was empirically generated to achieve this outcome. Don't think too
// much about them.
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1148), 148},
@@ -512,7 +512,7 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
{DiskLoc(0, 1912), 24},
{DiskLoc(0, 1628), 84},
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
@@ -526,7 +526,7 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
class CollscanHelper {
public:
CollscanHelper(int nExtents)
- : md(new DummyRecordStoreV1MetaData(true, 0)), rs(&txn, &cb, ns(), md, &em, false) {
+ : md(new DummyRecordStoreV1MetaData(true, 0)), rs(&opCtx, &cb, ns(), md, &em, false) {
LocAndSize recs[] = {{}};
LocAndSize drecs[8];
ASSERT_LESS_THAN(nExtents, 8);
@@ -537,9 +537,9 @@ public:
drecs[nExtents].loc = DiskLoc();
drecs[nExtents].size = 0;
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ md->setCapExtent(&opCtx, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&opCtx, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
// Insert bypasses standard alloc/insert routines to use the extent we want.
@@ -551,7 +551,7 @@ public:
BSONObj o = b.done();
int len = o.objsize();
Extent* e = em.getExtent(ext);
- e = txn.recoveryUnit()->writing(e);
+ e = opCtx.recoveryUnit()->writing(e);
int ofs;
if (e->lastRecord.isNull()) {
ofs = ext.getOfs() + (e->_extentData - (char*)e);
@@ -560,7 +560,7 @@ public:
}
DiskLoc dl(ext.a(), ofs);
MmapV1RecordHeader* r = em.recordForV1(dl);
- r = (MmapV1RecordHeader*)txn.recoveryUnit()->writingPtr(
+ r = (MmapV1RecordHeader*)opCtx.recoveryUnit()->writingPtr(
r, MmapV1RecordHeader::HeaderSize + len);
r->lengthWithHeaders() = MmapV1RecordHeader::HeaderSize + len;
r->extentOfs() = e->myLoc.getOfs();
@@ -570,7 +570,7 @@ public:
if (e->firstRecord.isNull())
e->firstRecord = dl;
else
- txn.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
+ opCtx.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
e->lastRecord = dl;
return dl;
}
@@ -579,7 +579,7 @@ public:
void walkAndCount(int expectedCount) {
// Walk the collection going forward.
{
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/true);
+ CappedRecordStoreV1Iterator cursor(&opCtx, &rs, /*forward=*/true);
int resultCount = 0;
while (auto record = cursor.next()) {
++resultCount;
@@ -590,7 +590,7 @@ public:
// Walk the collection going backwards.
{
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/false);
+ CappedRecordStoreV1Iterator cursor(&opCtx, &rs, /*forward=*/false);
int resultCount = expectedCount;
while (auto record = cursor.next()) {
--resultCount;
@@ -604,7 +604,7 @@ public:
return "unittests.QueryStageCollectionScanCapped";
}
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyRecordStoreV1MetaData* md;
DummyExtentManager em;
@@ -621,27 +621,27 @@ TEST(CappedRecordStoreV1QueryStage, CollscanCappedBase) {
TEST(CappedRecordStoreV1QueryStage, CollscanEmptyLooped) {
CollscanHelper h(1);
- h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.md->setCapFirstNewRecord(&h.opCtx, DiskLoc());
h.walkAndCount(0);
}
TEST(CappedRecordStoreV1QueryStage, CollscanEmptyMultiExtentLooped) {
CollscanHelper h(3);
- h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.md->setCapFirstNewRecord(&h.opCtx, DiskLoc());
h.walkAndCount(0);
}
TEST(CappedRecordStoreV1QueryStage, CollscanSingle) {
CollscanHelper h(1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 0));
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 0));
h.walkAndCount(1);
}
TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
CollscanHelper h(1);
DiskLoc x = h.insert(h.md->capExtent(), 0);
- h.md->setCapFirstNewRecord(&h.txn, x);
+ h.md->setCapFirstNewRecord(&h.opCtx, x);
h.insert(h.md->capExtent(), 1);
h.walkAndCount(2);
}
@@ -649,7 +649,7 @@ TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
CollscanHelper h(1);
h.insert(h.md->capExtent(), 0);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 1));
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 1));
h.insert(h.md->capExtent(), 2);
h.walkAndCount(3);
}
@@ -657,59 +657,59 @@ TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
TEST(CappedRecordStoreV1QueryStage, CollscanFirstExtent) {
CollscanHelper h(2);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanLastExtent) {
CollscanHelper h(2);
- h.md->setCapExtent(&h.txn, h.md->lastExtent(&h.txn));
+ h.md->setCapExtent(&h.opCtx, h.md->lastExtent(&h.opCtx));
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanMidExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.insert(h.md->firstExtent(&h.txn), 2);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.insert(h.md->firstExtent(&h.opCtx), 2);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 3));
h.insert(h.md->capExtent(), 4);
h.walkAndCount(5);
}
TEST(CappedRecordStoreV1QueryStage, CollscanAloneInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
- h.insert(h.md->lastExtent(&h.txn), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
+ h.insert(h.md->lastExtent(&h.opCtx), 0);
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.walkAndCount(3);
}
TEST(CappedRecordStoreV1QueryStage, CollscanFirstInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
- h.insert(h.md->lastExtent(&h.txn), 0);
- h.insert(h.md->firstExtent(&h.txn), 1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
+ h.insert(h.md->lastExtent(&h.opCtx), 0);
+ h.insert(h.md->firstExtent(&h.opCtx), 1);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 2));
h.insert(h.md->capExtent(), 3);
h.walkAndCount(4);
}
TEST(CappedRecordStoreV1QueryStage, CollscanLastInExtent) {
CollscanHelper h(3);
- h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.md->setCapExtent(&h.opCtx, h.em.getExtent(h.md->firstExtent(&h.opCtx))->xnext);
h.insert(h.md->capExtent(), 0);
- h.insert(h.md->lastExtent(&h.txn), 1);
- h.insert(h.md->firstExtent(&h.txn), 2);
- h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.insert(h.md->lastExtent(&h.opCtx), 1);
+ h.insert(h.md->firstExtent(&h.opCtx), 2);
+ h.md->setCapFirstNewRecord(&h.opCtx, h.insert(h.md->capExtent(), 3));
h.walkAndCount(4);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
index ac8f083eb82..872c29e112b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
@@ -40,9 +40,9 @@ namespace mongo {
using std::endl;
-RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* txn,
+RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* opCtx,
const RecordStoreV1Base* recordStore)
- : _txn(txn), _recordStore(recordStore), _stage(FORWARD_SCAN) {
+ : _opCtx(opCtx), _recordStore(recordStore), _stage(FORWARD_SCAN) {
// Position the iterator at the first record
//
advance();
@@ -53,7 +53,7 @@ boost::optional<Record> RecordStoreV1RepairCursor::next() {
return {};
auto out = _currRecord.toRecordId();
advance();
- return {{out, _recordStore->dataFor(_txn, out)}};
+ return {{out, _recordStore->dataFor(_opCtx, out)}};
}
void RecordStoreV1RepairCursor::advance() {
@@ -76,10 +76,10 @@ void RecordStoreV1RepairCursor::advance() {
} else {
switch (_stage) {
case FORWARD_SCAN:
- _currRecord = _recordStore->getNextRecordInExtent(_txn, _currRecord);
+ _currRecord = _recordStore->getNextRecordInExtent(_opCtx, _currRecord);
break;
case BACKWARD_SCAN:
- _currRecord = _recordStore->getPrevRecordInExtent(_txn, _currRecord);
+ _currRecord = _recordStore->getPrevRecordInExtent(_opCtx, _currRecord);
break;
default:
invariant(!"This should never be reached.");
@@ -116,10 +116,10 @@ bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
if (_currExtent.isNull()) {
switch (_stage) {
case FORWARD_SCAN:
- _currExtent = _recordStore->details()->firstExtent(_txn);
+ _currExtent = _recordStore->details()->firstExtent(_opCtx);
break;
case BACKWARD_SCAN:
- _currExtent = _recordStore->details()->lastExtent(_txn);
+ _currExtent = _recordStore->details()->lastExtent(_opCtx);
break;
default:
invariant(DONE == _stage);
@@ -181,7 +181,7 @@ bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
return true;
}
-void RecordStoreV1RepairCursor::invalidate(OperationContext* txn, const RecordId& id) {
+void RecordStoreV1RepairCursor::invalidate(OperationContext* opCtx, const RecordId& id) {
// If we see this record again it probably means it was reinserted rather than an infinite
// loop. If we do loop, we should quickly hit another seen record that hasn't been
// invalidated.
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
index b65782cd27b..d95683a7c42 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
@@ -42,19 +42,19 @@ namespace mongo {
*/
class RecordStoreV1RepairCursor final : public RecordCursor {
public:
- RecordStoreV1RepairCursor(OperationContext* txn, const RecordStoreV1Base* recordStore);
+ RecordStoreV1RepairCursor(OperationContext* opCtx, const RecordStoreV1Base* recordStore);
boost::optional<Record> next() final;
- void invalidate(OperationContext* txn, const RecordId& dl);
+ void invalidate(OperationContext* opCtx, const RecordId& dl);
void save() final {}
bool restore() final {
return true;
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
// Explicitly not supporting fetcherForNext(). The expected use case for this class is a
@@ -74,7 +74,7 @@ private:
bool _advanceToNextValidExtent();
// transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ OperationContext* _opCtx;
// Reference to the owning RecordStore. The store must not be deleted while there are
// active iterators on it.
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index dfe5860ce33..0b1e2f867c1 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -70,7 +70,7 @@ static ServerStatusMetricField<Counter64> dFreelist2("storage.freelist.search.bu
static ServerStatusMetricField<Counter64> dFreelist3("storage.freelist.search.scanned",
&freelistIterations);
-SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* txn,
+SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* opCtx,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
@@ -82,7 +82,7 @@ SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* txn,
SimpleRecordStoreV1::~SimpleRecordStoreV1() {}
-DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, int lenToAllocRaw) {
+DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* opCtx, int lenToAllocRaw) {
// Slowly drain the deletedListLegacyGrabBag by popping one record off and putting it in the
// correct deleted list each time we try to allocate a new record. This ensures we won't
// orphan any data when upgrading from old versions, without needing a long upgrade phase.
@@ -91,8 +91,8 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
{
const DiskLoc head = _details->deletedListLegacyGrabBag();
if (!head.isNull()) {
- _details->setDeletedListLegacyGrabBag(txn, drec(head)->nextDeleted());
- addDeletedRec(txn, head);
+ _details->setDeletedListLegacyGrabBag(opCtx, drec(head)->nextDeleted());
+ addDeletedRec(opCtx, head);
}
}
@@ -122,8 +122,8 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
return DiskLoc(); // no space
// Unlink ourself from the deleted list
- _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted());
- *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
+ _details->setDeletedListEntry(opCtx, myBucket, dr->nextDeleted());
+ *opCtx->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
}
invariant(dr->extentOfs() < loc.getOfs());
@@ -132,20 +132,20 @@ DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, in
// allocation size. Otherwise, just take the whole DeletedRecord.
const int remainingLength = dr->lengthWithHeaders() - lenToAlloc;
if (remainingLength >= bucketSizes[0]) {
- txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
+ opCtx->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc);
- DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc));
+ DeletedRecord* newDel = opCtx->recoveryUnit()->writing(drec(newDelLoc));
newDel->extentOfs() = dr->extentOfs();
newDel->lengthWithHeaders() = remainingLength;
newDel->nextDeleted().Null();
- addDeletedRec(txn, newDelLoc);
+ addDeletedRec(opCtx, newDelLoc);
}
return loc;
}
-StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
+StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota) {
if (lengthWithHeaders > MaxAllowedAllocation) {
@@ -156,18 +156,18 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
<< " > 16.5MB");
}
- DiskLoc loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ DiskLoc loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull())
return StatusWith<DiskLoc>(loc);
LOG(1) << "allocating new extent";
increaseStorageSize(
- txn,
- _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ opCtx,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(opCtx)),
enforceQuota);
- loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull()) {
// got on first try
return StatusWith<DiskLoc>(loc);
@@ -175,17 +175,17 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
log() << "warning: alloc() failed after allocating new extent. "
<< "lengthWithHeaders: " << lengthWithHeaders
- << " last extent size:" << _details->lastExtentSize(txn) << "; trying again";
+ << " last extent size:" << _details->lastExtentSize(opCtx) << "; trying again";
- for (int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(txn); z++) {
+ for (int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(opCtx); z++) {
log() << "try #" << z << endl;
increaseStorageSize(
- txn,
- _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ opCtx,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(opCtx)),
enforceQuota);
- loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ loc = _allocFromExistingExtents(opCtx, lengthWithHeaders);
if (!loc.isNull())
return StatusWith<DiskLoc>(loc);
}
@@ -193,8 +193,8 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(ErrorCodes::InternalError, "cannot allocate space");
}
-Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
- const DiskLoc firstExtLoc = _details->firstExtent(txn);
+Status SimpleRecordStoreV1::truncate(OperationContext* opCtx) {
+ const DiskLoc firstExtLoc = _details->firstExtent(opCtx);
if (firstExtLoc.isNull() || !firstExtLoc.isValid()) {
// Already empty
return Status::OK();
@@ -204,53 +204,53 @@ Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
Extent* firstExt = _extentManager->getExtent(firstExtLoc);
if (!firstExt->xnext.isNull()) {
const DiskLoc extNextLoc = firstExt->xnext;
- const DiskLoc oldLastExtLoc = _details->lastExtent(txn);
+ const DiskLoc oldLastExtLoc = _details->lastExtent(opCtx);
Extent* const nextExt = _extentManager->getExtent(extNextLoc);
// Unlink other extents;
- *txn->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
- _details->setLastExtent(txn, firstExtLoc);
- _details->setLastExtentSize(txn, firstExt->length);
+ *opCtx->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
+ _details->setLastExtent(opCtx, firstExtLoc);
+ _details->setLastExtentSize(opCtx, firstExt->length);
- _extentManager->freeExtents(txn, extNextLoc, oldLastExtLoc);
+ _extentManager->freeExtents(opCtx, extNextLoc, oldLastExtLoc);
}
// Make the first (now only) extent a single large deleted record.
- *txn->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
- _details->orphanDeletedList(txn);
- addDeletedRec(txn, _findFirstSpot(txn, firstExtLoc, firstExt));
+ *opCtx->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
+ _details->orphanDeletedList(opCtx);
+ addDeletedRec(opCtx, _findFirstSpot(opCtx, firstExtLoc, firstExt));
// Make stats reflect that there are now no documents in this record store.
- _details->setStats(txn, 0, 0);
+ _details->setStats(opCtx, 0, 0);
return Status::OK();
}
-void SimpleRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
+void SimpleRecordStoreV1::addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc) {
DeletedRecord* d = drec(dloc);
int b = bucket(d->lengthWithHeaders());
- *txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
- _details->setDeletedListEntry(txn, b, dloc);
+ *opCtx->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
+ _details->setDeletedListEntry(opCtx, b, dloc);
}
-std::unique_ptr<SeekableRecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* opCtx,
bool forward) const {
- return stdx::make_unique<SimpleRecordStoreV1Iterator>(txn, this, forward);
+ return stdx::make_unique<SimpleRecordStoreV1Iterator>(opCtx, this, forward);
}
vector<std::unique_ptr<RecordCursor>> SimpleRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
vector<std::unique_ptr<RecordCursor>> cursors;
const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
+ for (DiskLoc extLoc = details()->firstExtent(opCtx); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(opCtx, extLoc);
if (ext->firstRecord.isNull())
continue;
- cursors.push_back(
- stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(txn, ext->firstRecord, this));
+ cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
+ opCtx, ext->firstRecord, this));
}
return cursors;
@@ -284,7 +284,7 @@ private:
size_t _allocationSize;
};
-void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
+void SimpleRecordStoreV1::_compactExtent(OperationContext* opCtx,
const DiskLoc extentLoc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
@@ -322,12 +322,12 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
long long nrecords = 0;
DiskLoc nextSourceLoc = sourceExtent->firstRecord;
while (!nextSourceLoc.isNull()) {
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
MmapV1RecordHeader* recOld = recordFor(nextSourceLoc);
RecordData oldData = recOld->toRecordData();
- nextSourceLoc = getNextRecordInExtent(txn, nextSourceLoc);
+ nextSourceLoc = getNextRecordInExtent(opCtx, nextSourceLoc);
if (compactOptions->validateDocuments && !adaptor->isDataValid(oldData)) {
// object is corrupt!
@@ -369,7 +369,7 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
// start of the compact, this insert will allocate a record in a new extent.
// See the comment in compact() for more details.
CompactDocWriter writer(recOld, rawDataSize, allocationSize);
- StatusWith<RecordId> status = insertRecordWithDocWriter(txn, &writer);
+ StatusWith<RecordId> status = insertRecordWithDocWriter(opCtx, &writer);
uassertStatusOK(status.getStatus());
const MmapV1RecordHeader* newRec =
recordFor(DiskLoc::fromRecordId(status.getValue()));
@@ -384,18 +384,18 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
// Remove the old record from the linked list of records withing the sourceExtent.
// The old record is not added to the freelist as we will be freeing the whole
// extent at the end.
- *txn->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
+ *opCtx->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
if (nextSourceLoc.isNull()) {
// Just moved the last record out of the extent. Mark extent as empty.
- *txn->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
+ *opCtx->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
} else {
MmapV1RecordHeader* newFirstRecord = recordFor(nextSourceLoc);
- txn->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
+ opCtx->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
}
// Adjust the stats to reflect the removal of the old record. The insert above
// handled adjusting the stats for the new record.
- _details->incrementStats(txn, -(recOld->netLength()), -1);
+ _details->incrementStats(opCtx, -(recOld->netLength()), -1);
wunit.commit();
}
@@ -405,16 +405,16 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
invariant(sourceExtent->lastRecord.isNull());
// We are still the first extent, but we must not be the only extent.
- invariant(_details->firstExtent(txn) == extentLoc);
- invariant(_details->lastExtent(txn) != extentLoc);
+ invariant(_details->firstExtent(opCtx) == extentLoc);
+ invariant(_details->lastExtent(opCtx) != extentLoc);
// Remove the newly emptied sourceExtent from the extent linked list and return it to
// the extent manager.
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
const DiskLoc newFirst = sourceExtent->xnext;
- _details->setFirstExtent(txn, newFirst);
- *txn->recoveryUnit()->writing(&_extentManager->getExtent(newFirst)->xprev) = DiskLoc();
- _extentManager->freeExtent(txn, extentLoc);
+ _details->setFirstExtent(opCtx, newFirst);
+ *opCtx->recoveryUnit()->writing(&_extentManager->getExtent(newFirst)->xprev) = DiskLoc();
+ _extentManager->freeExtent(opCtx, extentLoc);
wunit.commit();
{
@@ -428,53 +428,53 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
}
}
-Status SimpleRecordStoreV1::compact(OperationContext* txn,
+Status SimpleRecordStoreV1::compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
std::vector<DiskLoc> extents;
- for (DiskLoc extLocation = _details->firstExtent(txn); !extLocation.isNull();
+ for (DiskLoc extLocation = _details->firstExtent(opCtx); !extLocation.isNull();
extLocation = _extentManager->getExtent(extLocation)->xnext) {
extents.push_back(extLocation);
}
log() << "compact " << extents.size() << " extents";
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
// Orphaning the deleted lists ensures that all inserts go to new extents rather than
// the ones that existed before starting the compact. If we abort the operation before
// completion, any free space in the old extents will be leaked and never reused unless
// the collection is compacted again or dropped. This is considered an acceptable
// failure mode as no data will be lost.
log() << "compact orphan deleted lists" << endl;
- _details->orphanDeletedList(txn);
+ _details->orphanDeletedList(opCtx);
// Start over from scratch with our extent sizing and growth
- _details->setLastExtentSize(txn, 0);
+ _details->setLastExtentSize(opCtx, 0);
// create a new extent so new records go there
- increaseStorageSize(txn, _details->lastExtentSize(txn), true);
+ increaseStorageSize(opCtx, _details->lastExtentSize(opCtx), true);
wunit.commit();
}
- stdx::unique_lock<Client> lk(*txn->getClient());
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
ProgressMeterHolder pm(
- *txn->setMessage_inlock("compact extent", "Extent Compacting Progress", extents.size()));
+ *opCtx->setMessage_inlock("compact extent", "Extent Compacting Progress", extents.size()));
lk.unlock();
// Go through all old extents and move each record to a new set of extents.
int extentNumber = 0;
for (std::vector<DiskLoc>::iterator it = extents.begin(); it != extents.end(); it++) {
- txn->checkForInterrupt();
- invariant(_details->firstExtent(txn) == *it);
+ opCtx->checkForInterrupt();
+ invariant(_details->firstExtent(opCtx) == *it);
// empties and removes the first extent
- _compactExtent(txn, *it, extentNumber++, adaptor, options, stats);
- invariant(_details->firstExtent(txn) != *it);
+ _compactExtent(opCtx, *it, extentNumber++, adaptor, options, stats);
+ invariant(_details->firstExtent(opCtx) != *it);
pm.hit();
}
- invariant(_extentManager->getExtent(_details->firstExtent(txn))->xprev.isNull());
- invariant(_extentManager->getExtent(_details->lastExtent(txn))->xnext.isNull());
+ invariant(_extentManager->getExtent(_details->firstExtent(opCtx))->xprev.isNull());
+ invariant(_extentManager->getExtent(_details->lastExtent(opCtx))->xnext.isNull());
// indexes will do their own progress meter
pm.finished();
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
index 80fe4e8018b..61c04bbf420 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
@@ -41,7 +41,7 @@ class SimpleRecordStoreV1Cursor;
// used by index and original collections
class SimpleRecordStoreV1 : public RecordStoreV1Base {
public:
- SimpleRecordStoreV1(OperationContext* txn,
+ SimpleRecordStoreV1(OperationContext* opCtx,
StringData ns,
RecordStoreV1MetaData* details,
ExtentManager* em,
@@ -53,14 +53,14 @@ public:
return "SimpleRecordStoreV1";
}
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) {
invariant(!"cappedTruncateAfter not supported");
}
@@ -70,7 +70,7 @@ public:
virtual bool compactsInPlace() const {
return false;
}
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats);
@@ -83,16 +83,16 @@ protected:
return !_details->isUserFlagSet(CollectionOptions::Flag_NoPadding);
}
- virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* opCtx,
int lengthWithHeaders,
bool enforceQuota);
- virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc);
+ virtual void addDeletedRec(OperationContext* opCtx, const DiskLoc& dloc);
private:
- DiskLoc _allocFromExistingExtents(OperationContext* txn, int lengthWithHeaders);
+ DiskLoc _allocFromExistingExtents(OperationContext* opCtx, int lengthWithHeaders);
- void _compactExtent(OperationContext* txn,
+ void _compactExtent(OperationContext* opCtx,
const DiskLoc diskloc,
int extentNumber,
RecordStoreCompactAdaptor* adaptor,
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
index 81cd3456a07..414e1016a6b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
@@ -39,18 +39,18 @@ namespace mongo {
// Regular / non-capped collection traversal
//
-SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
+SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* opCtx,
const SimpleRecordStoreV1* collection,
bool forward)
- : _txn(txn), _recordStore(collection), _forward(forward) {
+ : _opCtx(opCtx), _recordStore(collection), _forward(forward) {
// Eagerly seek to first Record on creation since it is cheap.
const ExtentManager* em = _recordStore->_extentManager;
- if (_recordStore->details()->firstExtent(txn).isNull()) {
+ if (_recordStore->details()->firstExtent(opCtx).isNull()) {
// nothing in the collection
- verify(_recordStore->details()->lastExtent(txn).isNull());
+ verify(_recordStore->details()->lastExtent(opCtx).isNull());
} else if (_forward) {
// Find a non-empty extent and start with the first record in it.
- Extent* e = em->getExtent(_recordStore->details()->firstExtent(txn));
+ Extent* e = em->getExtent(_recordStore->details()->firstExtent(opCtx));
while (e->firstRecord.isNull() && !e->xnext.isNull()) {
e = em->getExtent(e->xnext);
@@ -62,7 +62,7 @@ SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
} else {
// Walk backwards, skipping empty extents, and use the last record in the first
// non-empty extent we see.
- Extent* e = em->getExtent(_recordStore->details()->lastExtent(txn));
+ Extent* e = em->getExtent(_recordStore->details()->lastExtent(opCtx));
// TODO ELABORATE
// Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
@@ -81,33 +81,33 @@ boost::optional<Record> SimpleRecordStoreV1Iterator::next() {
return {};
auto toReturn = _curr.toRecordId();
advance();
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+ return {{toReturn, _recordStore->RecordStore::dataFor(_opCtx, toReturn)}};
}
boost::optional<Record> SimpleRecordStoreV1Iterator::seekExact(const RecordId& id) {
_curr = DiskLoc::fromRecordId(id);
advance();
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+ return {{id, _recordStore->RecordStore::dataFor(_opCtx, id)}};
}
void SimpleRecordStoreV1Iterator::advance() {
// Move to the next thing.
if (!isEOF()) {
if (_forward) {
- _curr = _recordStore->getNextRecord(_txn, _curr);
+ _curr = _recordStore->getNextRecord(_opCtx, _curr);
} else {
- _curr = _recordStore->getPrevRecord(_txn, _curr);
+ _curr = _recordStore->getPrevRecord(_opCtx, _curr);
}
}
}
-void SimpleRecordStoreV1Iterator::invalidate(OperationContext* txn, const RecordId& dl) {
+void SimpleRecordStoreV1Iterator::invalidate(OperationContext* opCtx, const RecordId& dl) {
// Just move past the thing being deleted.
if (dl == _curr.toRecordId()) {
const DiskLoc origLoc = _curr;
// Undo the advance on rollback, as the deletion that forced it "never happened".
- txn->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
+ opCtx->recoveryUnit()->onRollback([this, origLoc]() { this->_curr = origLoc; });
advance();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
index a480566f9d7..dd54877ee93 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
@@ -43,7 +43,7 @@ class SimpleRecordStoreV1;
*/
class SimpleRecordStoreV1Iterator final : public SeekableRecordCursor {
public:
- SimpleRecordStoreV1Iterator(OperationContext* txn,
+ SimpleRecordStoreV1Iterator(OperationContext* opCtx,
const SimpleRecordStoreV1* records,
bool forward);
@@ -52,12 +52,12 @@ public:
void save() final;
bool restore() final;
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
}
- void invalidate(OperationContext* txn, const RecordId& dl) final;
+ void invalidate(OperationContext* opCtx, const RecordId& dl) final;
std::unique_ptr<RecordFetcher> fetcherForNext() const final;
std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
@@ -68,7 +68,7 @@ private:
}
// for getNext, not owned
- OperationContext* _txn;
+ OperationContext* _opCtx;
// The result returned on the next call to getNext().
DiskLoc _curr;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index 573d4975fbf..e49ac7c1301 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -115,128 +115,134 @@ private:
/** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */
TEST(SimpleRecordStoreV1, AllocQuantized) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(300);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(512,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
TEST(SimpleRecordStoreV1, AllocNonQuantized) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(300);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
TEST(SimpleRecordStoreV1, AllocNonQuantizedStillAligned) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ md->setUserFlag(&opCtx, CollectionOptions::Flag_NoPadding);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BSONObj obj = docForRecordSize(298);
- StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, obj.objdata(), obj.objsize(), false);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */
TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns, md, &em, false);
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
// The length of the allocated record is quantized.
- ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(512,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/**
* alloc() does not quantize records if DocWriter::addPadding() returns false
*/
TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns + "$x", md, &em, false);
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
// The length of the allocated record is not quantized.
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */
TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, myns + "$x", md, &em, false);
BsonDocWriter docWriter(docForRecordSize(298), false);
- StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> result = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT(result.isOK());
- ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+ ASSERT_EQUALS(300,
+ rs.dataFor(&opCtx, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
}
/**
* alloc() with quantized size doesn't split if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
LocAndSize drecs[] = {{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -244,24 +250,24 @@ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
* alloc() with quantized size splits if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 32}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), true);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 512}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1512), 32}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -269,24 +275,24 @@ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
* alloc() with non quantized size doesn't split if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 331}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 331}, {}};
LocAndSize drecs[] = {{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -294,24 +300,24 @@ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
* alloc() with non quantized size splits if enough room left over.
*/
TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 332}, {}};
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, NULL, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(300), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 300}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1300), 32}, {}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
@@ -319,27 +325,27 @@ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
* alloc() will use from the legacy grab bag if it can.
*/
TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{}};
LocAndSize grabBag[] = {
{DiskLoc(0, 1000), 4 * 1024 * 1024}, {DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(256), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 256}, {}};
LocAndSize drecs[] = {{DiskLoc(0, 1256), 4 * 1024 * 1024 - 256}, {}};
LocAndSize grabBag[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
@@ -347,27 +353,27 @@ TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
* alloc() will pull from the legacy grab bag even if it isn't needed.
*/
TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
LocAndSize grabBag[] = {
{DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(1000), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 1000}, {}};
LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
@@ -375,54 +381,54 @@ TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
* alloc() will pull from the legacy grab bag even if it can't be used
*/
TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnusable) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize drecs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {
{DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ initializeV1RS(&opCtx, NULL, drecs, grabBag, &em, md);
}
BsonDocWriter docWriter(docForRecordSize(8 * 1024 * 1024), false);
- StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&txn, &docWriter);
+ StatusWith<RecordId> actualLocation = rs.insertRecordWithDocWriter(&opCtx, &docWriter);
ASSERT_OK(actualLocation.getStatus());
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, grabBag, &em, md);
}
}
// -----------------
TEST(SimpleRecordStoreV1, FullSimple1) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
ASSERT_EQUALS(0, md->numRecords());
- StatusWith<RecordId> result = rs.insertRecord(&txn, "abc", 4, 1000);
+ StatusWith<RecordId> result = rs.insertRecord(&opCtx, "abc", 4, 1000);
ASSERT_TRUE(result.isOK());
ASSERT_EQUALS(1, md->numRecords());
- RecordData recordData = rs.dataFor(&txn, result.getValue());
+ RecordData recordData = rs.dataFor(&opCtx, result.getValue());
ASSERT_EQUALS(string("abc"), string(recordData.data()));
}
// -----------------
TEST(SimpleRecordStoreV1, Truncate) {
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+ SimpleRecordStoreV1 rs(&opCtx, "test.foo", md, &em, false);
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -433,12 +439,12 @@ TEST(SimpleRecordStoreV1, Truncate) {
LocAndSize drecs[] = {
{DiskLoc(0, 1200), 100}, {DiskLoc(2, 1000), 100}, {DiskLoc(1, 1000), 1000}, {}};
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ initializeV1RS(&opCtx, recs, drecs, NULL, &em, md);
ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
}
- rs.truncate(&txn);
+ rs.truncate(&opCtx);
{
LocAndSize recs[] = {{}};
@@ -446,7 +452,7 @@ TEST(SimpleRecordStoreV1, Truncate) {
// One extent filled with a single deleted record.
{DiskLoc(0, Extent::HeaderSize()), em.minSize() - Extent::HeaderSize()},
{}};
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ assertStateV1RS(&opCtx, recs, drecs, NULL, &em, md);
}
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 6f4d3993cbe..3872e4bccd0 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -68,7 +68,7 @@ const DiskLoc& DummyRecordStoreV1MetaData::capExtent() const {
return _capExtent;
}
-void DummyRecordStoreV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setCapExtent(OperationContext* opCtx, const DiskLoc& loc) {
_capExtent = loc;
}
@@ -76,7 +76,7 @@ const DiskLoc& DummyRecordStoreV1MetaData::capFirstNewRecord() const {
return _capFirstNewRecord;
}
-void DummyRecordStoreV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc) {
_capFirstNewRecord = loc;
}
@@ -88,14 +88,14 @@ long long DummyRecordStoreV1MetaData::numRecords() const {
return _numRecords;
}
-void DummyRecordStoreV1MetaData::incrementStats(OperationContext* txn,
+void DummyRecordStoreV1MetaData::incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement) {
_dataSize += dataSizeIncrement;
_numRecords += numRecordsIncrement;
}
-void DummyRecordStoreV1MetaData::setStats(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setStats(OperationContext* opCtx,
long long dataSize,
long long numRecords) {
_dataSize = dataSize;
@@ -113,7 +113,7 @@ DiskLoc DummyRecordStoreV1MetaData::deletedListEntry(int bucket) const {
return _deletedLists[bucket];
}
-void DummyRecordStoreV1MetaData::setDeletedListEntry(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setDeletedListEntry(OperationContext* opCtx,
int bucket,
const DiskLoc& loc) {
invariant(bucket >= 0);
@@ -127,29 +127,29 @@ DiskLoc DummyRecordStoreV1MetaData::deletedListLegacyGrabBag() const {
return _deletedListLegacyGrabBag;
}
-void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* opCtx,
const DiskLoc& loc) {
_deletedListLegacyGrabBag = loc;
}
-void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* txn) {
+void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* opCtx) {
// They will be recreated on demand.
_deletedLists.clear();
}
-const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* txn) const {
+const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* opCtx) const {
return _firstExtent;
}
-void DummyRecordStoreV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setFirstExtent(OperationContext* opCtx, const DiskLoc& loc) {
_firstExtent = loc;
}
-const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* txn) const {
+const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* opCtx) const {
return _lastExtent;
}
-void DummyRecordStoreV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
+void DummyRecordStoreV1MetaData::setLastExtent(OperationContext* opCtx, const DiskLoc& loc) {
_lastExtent = loc;
}
@@ -161,21 +161,21 @@ bool DummyRecordStoreV1MetaData::isUserFlagSet(int flag) const {
return _userFlags & flag;
}
-bool DummyRecordStoreV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+bool DummyRecordStoreV1MetaData::setUserFlag(OperationContext* opCtx, int flag) {
if ((_userFlags & flag) == flag)
return false;
_userFlags |= flag;
return true;
}
-bool DummyRecordStoreV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+bool DummyRecordStoreV1MetaData::clearUserFlag(OperationContext* opCtx, int flag) {
if ((_userFlags & flag) == 0)
return false;
_userFlags &= ~flag;
return true;
}
-bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* opCtx, int flags) {
if (_userFlags == flags)
return false;
_userFlags = flags;
@@ -183,11 +183,11 @@ bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* txn, int fla
}
-int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* txn) const {
+int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* opCtx) const {
return _lastExtentSize;
}
-void DummyRecordStoreV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+void DummyRecordStoreV1MetaData::setLastExtentSize(OperationContext* opCtx, int newMax) {
_lastExtentSize = newMax;
}
@@ -204,9 +204,9 @@ DummyExtentManager::~DummyExtentManager() {
}
}
-void DummyExtentManager::close(OperationContext* txn) {}
+void DummyExtentManager::close(OperationContext* opCtx) {}
-Status DummyExtentManager::init(OperationContext* txn) {
+Status DummyExtentManager::init(OperationContext* opCtx) {
return Status::OK();
}
@@ -219,7 +219,7 @@ long long DummyExtentManager::fileSize() const {
return -1;
}
-DiskLoc DummyExtentManager::allocateExtent(OperationContext* txn,
+DiskLoc DummyExtentManager::allocateExtent(OperationContext* opCtx,
bool capped,
int size,
bool enforceQuota) {
@@ -244,14 +244,14 @@ DiskLoc DummyExtentManager::allocateExtent(OperationContext* txn,
return loc;
}
-void DummyExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
+void DummyExtentManager::freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt) {
// XXX
}
-void DummyExtentManager::freeExtent(OperationContext* txn, DiskLoc extent) {
+void DummyExtentManager::freeExtent(OperationContext* opCtx, DiskLoc extent) {
// XXX
}
-void DummyExtentManager::freeListStats(OperationContext* txn,
+void DummyExtentManager::freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const {
invariant(false);
@@ -297,11 +297,11 @@ DummyExtentManager::CacheHint* DummyExtentManager::cacheHint(const DiskLoc& exte
return new CacheHint();
}
-DataFileVersion DummyExtentManager::getFileFormat(OperationContext* txn) const {
+DataFileVersion DummyExtentManager::getFileFormat(OperationContext* opCtx) const {
return DataFileVersion::defaultForNewFiles();
}
-void DummyExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {}
+void DummyExtentManager::setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) {}
const DataFile* DummyExtentManager::getOpenFile(int n) const {
return nullptr;
@@ -324,9 +324,11 @@ void accumulateExtentSizeRequirements(const LocAndSize* las, std::map<int, size_
}
}
-void printRecList(OperationContext* txn, const ExtentManager* em, const RecordStoreV1MetaData* md) {
+void printRecList(OperationContext* opCtx,
+ const ExtentManager* em,
+ const RecordStoreV1MetaData* md) {
log() << " *** BEGIN ACTUAL RECORD LIST *** ";
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
std::set<DiskLoc> seenLocs;
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc, true);
@@ -380,7 +382,7 @@ void printDRecList(const ExtentManager* em, const RecordStoreV1MetaData* md) {
}
}
-void initializeV1RS(OperationContext* txn,
+void initializeV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -390,7 +392,7 @@ void initializeV1RS(OperationContext* txn,
// Need to start with a blank slate
invariant(em->numFiles() == 0);
- invariant(md->firstExtent(txn).isNull());
+ invariant(md->firstExtent(opCtx).isNull());
// pre-allocate extents (even extents that aren't part of this RS)
{
@@ -404,7 +406,7 @@ void initializeV1RS(OperationContext* txn,
const int maxExtent = extentSizes.rbegin()->first;
for (int i = 0; i <= maxExtent; i++) {
const size_t size = extentSizes.count(i) ? extentSizes[i] : 0;
- const DiskLoc loc = em->allocateExtent(txn, md->isCapped(), size, 0);
+ const DiskLoc loc = em->allocateExtent(opCtx, md->isCapped(), size, 0);
// This function and assertState depend on these details of DummyExtentManager
invariant(loc.a() == i);
@@ -412,8 +414,8 @@ void initializeV1RS(OperationContext* txn,
}
// link together extents that should be part of this RS
- md->setFirstExtent(txn, DiskLoc(extentSizes.begin()->first, 0));
- md->setLastExtent(txn, DiskLoc(extentSizes.rbegin()->first, 0));
+ md->setFirstExtent(opCtx, DiskLoc(extentSizes.begin()->first, 0));
+ md->setLastExtent(opCtx, DiskLoc(extentSizes.rbegin()->first, 0));
for (ExtentSizes::iterator it = extentSizes.begin(); boost::next(it) != extentSizes.end();
/* ++it */) {
const int a = it->first;
@@ -425,12 +427,12 @@ void initializeV1RS(OperationContext* txn,
// This signals "done allocating new extents".
if (md->isCapped())
- md->setDeletedListEntry(txn, 1, DiskLoc());
+ md->setDeletedListEntry(opCtx, 1, DiskLoc());
}
if (records && !records[0].loc.isNull()) {
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
while (!extLoc.isNull()) {
Extent* ext = em->getExtent(extLoc);
int prevOfs = DiskLoc::NullOfs;
@@ -440,7 +442,7 @@ void initializeV1RS(OperationContext* txn,
;
invariant(size >= MmapV1RecordHeader::HeaderSize);
- md->incrementStats(txn, size - MmapV1RecordHeader::HeaderSize, 1);
+ md->incrementStats(opCtx, size - MmapV1RecordHeader::HeaderSize, 1);
if (ext->firstRecord.isNull())
ext->firstRecord = loc;
@@ -480,7 +482,7 @@ void initializeV1RS(OperationContext* txn,
if (md->isCapped()) {
// All drecs form a single list in bucket 0
if (prevNextPtr == NULL) {
- md->setDeletedListEntry(txn, 0, loc);
+ md->setDeletedListEntry(opCtx, 0, loc);
} else {
*prevNextPtr = loc;
}
@@ -488,11 +490,11 @@ void initializeV1RS(OperationContext* txn,
if (loc.a() < md->capExtent().a() &&
drecs[drecIdx + 1].loc.a() == md->capExtent().a()) {
// Bucket 1 is known as cappedLastDelRecLastExtent
- md->setDeletedListEntry(txn, 1, loc);
+ md->setDeletedListEntry(opCtx, 1, loc);
}
} else if (bucket != lastBucket) {
invariant(bucket > lastBucket); // if this fails, drecs weren't sorted by bucket
- md->setDeletedListEntry(txn, bucket, loc);
+ md->setDeletedListEntry(opCtx, bucket, loc);
lastBucket = bucket;
} else {
*prevNextPtr = loc;
@@ -519,7 +521,7 @@ void initializeV1RS(OperationContext* txn,
invariant(size >= MmapV1RecordHeader::HeaderSize);
if (grabBagIdx == 0) {
- md->setDeletedListLegacyGrabBag(txn, loc);
+ md->setDeletedListLegacyGrabBag(opCtx, loc);
} else {
*prevNextPtr = loc;
}
@@ -535,10 +537,10 @@ void initializeV1RS(OperationContext* txn,
}
// Make sure we set everything up as requested.
- assertStateV1RS(txn, records, drecs, legacyGrabBag, em, md);
+ assertStateV1RS(opCtx, records, drecs, legacyGrabBag, em, md);
}
-void assertStateV1RS(OperationContext* txn,
+void assertStateV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -553,7 +555,7 @@ void assertStateV1RS(OperationContext* txn,
int recIdx = 0;
- DiskLoc extLoc = md->firstExtent(txn);
+ DiskLoc extLoc = md->firstExtent(opCtx);
while (!extLoc.isNull()) { // for each Extent
Extent* ext = em->getExtent(extLoc, true);
int expectedPrevOfs = DiskLoc::NullOfs;
@@ -579,7 +581,7 @@ void assertStateV1RS(OperationContext* txn,
}
if (ext->xnext.isNull()) {
- ASSERT_EQUALS(md->lastExtent(txn), extLoc);
+ ASSERT_EQUALS(md->lastExtent(opCtx), extLoc);
}
extLoc = ext->xnext;
@@ -602,7 +604,7 @@ void assertStateV1RS(OperationContext* txn,
// the first drec in the capExtent. If the capExtent is the first Extent,
// it should be Null.
- if (md->capExtent() == md->firstExtent(txn)) {
+ if (md->capExtent() == md->firstExtent(opCtx)) {
ASSERT_EQUALS(actualLoc, DiskLoc());
} else {
ASSERT_NOT_EQUALS(actualLoc.a(), md->capExtent().a());
@@ -659,7 +661,7 @@ void assertStateV1RS(OperationContext* txn,
}
} catch (...) {
// If a test fails, provide extra info to make debugging easier
- printRecList(txn, em, md);
+ printRecList(opCtx, em, md);
printDRecList(em, md);
throw;
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
index eac135dd24a..c9af1e5cc36 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
@@ -44,33 +44,33 @@ public:
virtual ~DummyRecordStoreV1MetaData() {}
virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
+ virtual void setCapFirstNewRecord(OperationContext* opCtx, const DiskLoc& loc);
virtual long long dataSize() const;
virtual long long numRecords() const;
- virtual void incrementStats(OperationContext* txn,
+ virtual void incrementStats(OperationContext* opCtx,
long long dataSizeIncrement,
long long numRecordsIncrement);
- virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
+ virtual void setStats(OperationContext* opCtx, long long dataSize, long long numRecords);
virtual DiskLoc deletedListEntry(int bucket) const;
- virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
+ virtual void setDeletedListEntry(OperationContext* opCtx, int bucket, const DiskLoc& loc);
virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual void setDeletedListLegacyGrabBag(OperationContext* opCtx, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual void orphanDeletedList(OperationContext* opCtx);
- virtual const DiskLoc& firstExtent(OperationContext* txn) const;
- virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& firstExtent(OperationContext* opCtx) const;
+ virtual void setFirstExtent(OperationContext* opCtx, const DiskLoc& loc);
- virtual const DiskLoc& lastExtent(OperationContext* txn) const;
- virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
+ virtual const DiskLoc& lastExtent(OperationContext* opCtx) const;
+ virtual void setLastExtent(OperationContext* opCtx, const DiskLoc& loc);
virtual bool isCapped() const;
@@ -78,13 +78,13 @@ public:
virtual int userFlags() const {
return _userFlags;
}
- virtual bool setUserFlag(OperationContext* txn, int flag);
- virtual bool clearUserFlag(OperationContext* txn, int flag);
- virtual bool replaceUserFlags(OperationContext* txn, int flags);
+ virtual bool setUserFlag(OperationContext* opCtx, int flag);
+ virtual bool clearUserFlag(OperationContext* opCtx, int flag);
+ virtual bool replaceUserFlags(OperationContext* opCtx, int flags);
- virtual int lastExtentSize(OperationContext* txn) const;
- virtual void setLastExtentSize(OperationContext* txn, int newMax);
+ virtual int lastExtentSize(OperationContext* opCtx) const;
+ virtual void setLastExtentSize(OperationContext* opCtx, int newMax);
virtual long long maxCappedDocs() const;
@@ -113,20 +113,23 @@ class DummyExtentManager : public ExtentManager {
public:
virtual ~DummyExtentManager();
- virtual void close(OperationContext* txn);
+ virtual void close(OperationContext* opCtx);
- virtual Status init(OperationContext* txn);
+ virtual Status init(OperationContext* opCtx);
virtual int numFiles() const;
virtual long long fileSize() const;
- virtual DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
+ virtual DiskLoc allocateExtent(OperationContext* opCtx,
+ bool capped,
+ int size,
+ bool enforceQuota);
- virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
+ virtual void freeExtents(OperationContext* opCtx, DiskLoc firstExt, DiskLoc lastExt);
- virtual void freeExtent(OperationContext* txn, DiskLoc extent);
+ virtual void freeExtent(OperationContext* opCtx, DiskLoc extent);
- virtual void freeListStats(OperationContext* txn,
+ virtual void freeListStats(OperationContext* opCtx,
int* numExtents,
int64_t* totalFreeSizeBytes) const;
@@ -144,9 +147,9 @@ public:
virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint);
- DataFileVersion getFileFormat(OperationContext* txn) const final;
+ DataFileVersion getFileFormat(OperationContext* opCtx) const final;
- virtual void setFileFormat(OperationContext* txn, DataFileVersion newVersion) final;
+ virtual void setFileFormat(OperationContext* opCtx, DataFileVersion newVersion) final;
const DataFile* getOpenFile(int n) const final;
@@ -184,7 +187,7 @@ struct LocAndSize {
*
* ExtentManager and MetaData must both be empty.
*/
-void initializeV1RS(OperationContext* txn,
+void initializeV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
@@ -198,7 +201,7 @@ void initializeV1RS(OperationContext* txn,
* List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer means don't check
* that list.
*/
-void assertStateV1RS(OperationContext* txn,
+void assertStateV1RS(OperationContext* opCtx,
const LocAndSize* records,
const LocAndSize* drecs,
const LocAndSize* legacyGrabBag,
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index ea76462eaad..c321cd5513c 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -236,11 +236,11 @@ void _applyOpToDataFiles(const string& database,
class RepairFileDeleter {
public:
- RepairFileDeleter(OperationContext* txn,
+ RepairFileDeleter(OperationContext* opCtx,
const string& dbName,
const string& pathString,
const Path& path)
- : _txn(txn), _dbName(dbName), _pathString(pathString), _path(path), _success(false) {}
+ : _opCtx(opCtx), _dbName(dbName), _pathString(pathString), _path(path), _success(false) {}
~RepairFileDeleter() {
if (_success)
@@ -250,10 +250,10 @@ public:
<< "db: " << _dbName << " path: " << _pathString;
try {
- getDur().syncDataAndTruncateJournal(_txn);
+ getDur().syncDataAndTruncateJournal(_opCtx);
// need both in case journaling is disabled
- MongoFile::flushAll(_txn, true);
+ MongoFile::flushAll(_opCtx, true);
MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(_path));
} catch (DBException& e) {
@@ -268,21 +268,21 @@ public:
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
string _dbName;
string _pathString;
Path _path;
bool _success;
};
-Status MMAPV1Engine::repairDatabase(OperationContext* txn,
+Status MMAPV1Engine::repairDatabase(OperationContext* opCtx,
const std::string& dbName,
bool preserveClonedFilesOnFailure,
bool backupOriginalFiles) {
unique_ptr<RepairFileDeleter> repairFileDeleter;
// Must be done before and after repair
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
intmax_t totalSize = dbSize(dbName);
intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
@@ -296,7 +296,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
<< " (bytes)");
}
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
Path reservedPath = uniqueReservedPath(
(preserveClonedFilesOnFailure || backupOriginalFiles) ? "backup" : "_tmp");
@@ -307,10 +307,10 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
if (!preserveClonedFilesOnFailure)
repairFileDeleter.reset(
- new RepairFileDeleter(txn, dbName, reservedPathString, reservedPath));
+ new RepairFileDeleter(opCtx, dbName, reservedPathString, reservedPath));
{
- Database* originalDatabase = dbHolder().openDb(txn, dbName);
+ Database* originalDatabase = dbHolder().openDb(opCtx, dbName);
if (originalDatabase == NULL) {
return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
}
@@ -319,30 +319,30 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
unique_ptr<Database> tempDatabase;
// Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
- ON_BLOCK_EXIT([&dbEntry, &txn] {
- getDur().syncDataAndTruncateJournal(txn);
- dbEntry->close(txn);
+ ON_BLOCK_EXIT([&dbEntry, &opCtx] {
+ getDur().syncDataAndTruncateJournal(opCtx);
+ dbEntry->close(opCtx);
});
{
dbEntry.reset(new MMAPV1DatabaseCatalogEntry(
- txn,
+ opCtx,
dbName,
reservedPathString,
storageGlobalParams.directoryperdb,
true,
_extentManagerFactory->create(
dbName, reservedPathString, storageGlobalParams.directoryperdb)));
- tempDatabase.reset(new Database(txn, dbName, dbEntry.get()));
+ tempDatabase.reset(new Database(opCtx, dbName, dbEntry.get()));
}
map<string, CollectionOptions> namespacesToCopy;
{
string ns = dbName + ".system.namespaces";
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(opCtx, ns);
Collection* coll = originalDatabase->getCollection(ns);
if (coll) {
- auto cursor = coll->getCursor(txn);
+ auto cursor = coll->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj obj = record->data.releaseToBson();
@@ -378,23 +378,23 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
Collection* tempCollection = NULL;
{
- WriteUnitOfWork wunit(txn);
- tempCollection = tempDatabase->createCollection(txn, ns, options, false);
+ WriteUnitOfWork wunit(opCtx);
+ tempCollection = tempDatabase->createCollection(opCtx, ns, options, false);
wunit.commit();
}
- OldClientContext readContext(txn, ns, originalDatabase);
+ OldClientContext readContext(opCtx, ns, originalDatabase);
Collection* originalCollection = originalDatabase->getCollection(ns);
invariant(originalCollection);
// data
// TODO SERVER-14812 add a mode that drops duplicates rather than failing
- MultiIndexBlock indexer(txn, tempCollection);
+ MultiIndexBlock indexer(opCtx, tempCollection);
{
vector<BSONObj> indexes;
IndexCatalog::IndexIterator ii =
- originalCollection->getIndexCatalog()->getIndexIterator(txn, false);
+ originalCollection->getIndexCatalog()->getIndexIterator(opCtx, false);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
indexes.push_back(desc->infoObj());
@@ -407,17 +407,17 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
}
std::vector<MultiIndexBlock*> indexers{&indexer};
- auto cursor = originalCollection->getCursor(txn);
+ auto cursor = originalCollection->getCursor(opCtx);
while (auto record = cursor->next()) {
BSONObj doc = record->data.releaseToBson();
- WriteUnitOfWork wunit(txn);
- Status status = tempCollection->insertDocument(txn, doc, indexers, false);
+ WriteUnitOfWork wunit(opCtx);
+ Status status = tempCollection->insertDocument(opCtx, doc, indexers, false);
if (!status.isOK())
return status;
wunit.commit();
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
Status status = indexer.doneInserting();
@@ -425,18 +425,18 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
return status;
{
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(opCtx);
indexer.commit();
wunit.commit();
}
}
- getDur().syncDataAndTruncateJournal(txn);
+ getDur().syncDataAndTruncateJournal(opCtx);
// need both in case journaling is disabled
- MongoFile::flushAll(txn, true);
+ MongoFile::flushAll(opCtx, true);
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
}
// at this point if we abort, we don't want to delete new files
@@ -446,7 +446,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
repairFileDeleter->success();
// Close the database so we can rename/delete the original data files
- dbHolder().close(txn, dbName);
+ dbHolder().close(opCtx, dbName);
if (backupOriginalFiles) {
_renameForBackup(dbName, reservedPath);
@@ -472,7 +472,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
}
// Reopen the database so it's discoverable
- dbHolder().openDb(txn, dbName);
+ dbHolder().openDb(opCtx, dbName);
return Status::OK();
}
diff --git a/src/mongo/db/storage/record_fetcher.h b/src/mongo/db/storage/record_fetcher.h
index e133e28bdf0..0c8d5f18080 100644
--- a/src/mongo/db/storage/record_fetcher.h
+++ b/src/mongo/db/storage/record_fetcher.h
@@ -44,7 +44,7 @@ public:
/**
* Performs any setup which is needed prior to yielding locks.
*/
- virtual void setup(OperationContext* txn) = 0;
+ virtual void setup(OperationContext* opCtx) = 0;
/**
* Called after locks are yielded in order to bring data into memory.
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index f17e07bfa59..c321938af06 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -78,7 +78,8 @@ protected:
class UpdateNotifier {
public:
virtual ~UpdateNotifier() {}
- virtual Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) = 0;
+ virtual Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx,
+ const RecordId& loc) = 0;
};
/**
@@ -199,12 +200,12 @@ public:
/**
* Inform the cursor that this id is being invalidated. Must be called between save and restore.
- * The txn is that of the operation causing the invalidation, not the txn using the cursor.
+ * The opCtx is that of the operation causing the invalidation, not the opCtx using the cursor.
*
* WARNING: Storage engines other than MMAPv1 should use the default implementation,
* and not depend on this being called.
*/
- virtual void invalidate(OperationContext* txn, const RecordId& id) {}
+ virtual void invalidate(OperationContext* opCtx, const RecordId& id) {}
//
// RecordFetchers
@@ -299,13 +300,13 @@ public:
* The dataSize is an approximation of the sum of the sizes (in bytes) of the
* documents or entries in the recordStore.
*/
- virtual long long dataSize(OperationContext* txn) const = 0;
+ virtual long long dataSize(OperationContext* opCtx) const = 0;
/**
* Total number of record in the RecordStore. You may need to cache it, so this call
* takes constant time, as it is called often.
*/
- virtual long long numRecords(OperationContext* txn) const = 0;
+ virtual long long numRecords(OperationContext* opCtx) const = 0;
virtual bool isCapped() const = 0;
@@ -318,7 +319,7 @@ public:
* @param level - optional, level of debug info to put in (higher is more)
* @return total estimate size (in bytes) on stable storage
*/
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const = 0;
@@ -333,9 +334,9 @@ public:
* In general, prefer findRecord or RecordCursor::seekExact since they can tell you if a
* record has been removed.
*/
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& loc) const {
RecordData data;
- invariant(findRecord(txn, loc, &data));
+ invariant(findRecord(opCtx, loc, &data));
return data;
}
@@ -353,8 +354,8 @@ public:
* potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
* storage engine. All new storage engines must support detecting the existence of Records.
*/
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const {
- auto cursor = getCursor(txn);
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* out) const {
+ auto cursor = getCursor(opCtx);
auto record = cursor->seekExact(loc);
if (!record)
return false;
@@ -364,19 +365,19 @@ public:
return true;
}
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl) = 0;
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& dl) = 0;
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) = 0;
- virtual Status insertRecords(OperationContext* txn,
+ virtual Status insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota) {
for (auto& record : *records) {
StatusWith<RecordId> res =
- insertRecord(txn, record.data.data(), record.data.size(), enforceQuota);
+ insertRecord(opCtx, record.data.data(), record.data.size(), enforceQuota);
if (!res.isOK())
return res.getStatus();
@@ -394,7 +395,7 @@ public:
* On success, if idsOut is non-null the RecordIds of the inserted records will be written into
* it. It must have space for nDocs RecordIds.
*/
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut = nullptr) = 0;
@@ -402,9 +403,9 @@ public:
/**
* A thin wrapper around insertRecordsWithDocWriter() to simplify handling of single DocWriters.
*/
- StatusWith<RecordId> insertRecordWithDocWriter(OperationContext* txn, const DocWriter* doc) {
+ StatusWith<RecordId> insertRecordWithDocWriter(OperationContext* opCtx, const DocWriter* doc) {
RecordId out;
- Status status = insertRecordsWithDocWriter(txn, &doc, 1, &out);
+ Status status = insertRecordsWithDocWriter(opCtx, &doc, 1, &out);
if (!status.isOK())
return status;
return out;
@@ -422,7 +423,7 @@ public:
*
* For capped record stores, the record size will never change.
*/
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -446,7 +447,7 @@ public:
* @return the updated version of the record. If unowned data is returned, then it is valid
* until the next modification of this Record or the lock on the collection has been released.
*/
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& loc,
const RecordData& oldRec,
const char* damageSource,
@@ -460,7 +461,7 @@ public:
* are allowed to lazily seek to the first Record when next() is called rather than doing
* it on construction.
*/
- virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward = true) const = 0;
/**
@@ -468,7 +469,7 @@ public:
* damaged records. The iterator might return every record in the store if all of them
* are reachable and not corrupted. Returns NULL if not supported.
*/
- virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const {
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* opCtx) const {
return {};
}
@@ -483,7 +484,7 @@ public:
* the record store. Implementations should avoid obvious biases toward older, newer, larger
* smaller or other specific classes of documents.
*/
- virtual std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* txn) const {
+ virtual std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* opCtx) const {
return {};
}
@@ -491,9 +492,10 @@ public:
* Returns many RecordCursors that partition the RecordStore into many disjoint sets.
* Iterating all returned RecordCursors is equivalent to iterating the full store.
*/
- virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const {
+ virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
+ OperationContext* opCtx) const {
std::vector<std::unique_ptr<RecordCursor>> out(1);
- out[0] = getCursor(txn);
+ out[0] = getCursor(opCtx);
return out;
}
@@ -503,7 +505,7 @@ public:
/**
* removes all Records
*/
- virtual Status truncate(OperationContext* txn) = 0;
+ virtual Status truncate(OperationContext* opCtx) = 0;
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -511,7 +513,7 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) = 0;
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) = 0;
/**
* does this RecordStore support the compact operation?
@@ -537,7 +539,7 @@ public:
* Only called if compactSupported() returns true.
* No RecordStoreCompactAdaptor will be passed if compactsInPlace() returns true.
*/
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
@@ -549,7 +551,7 @@ public:
* OK will be returned even if corruption is found
* deatils will be in result
*/
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
@@ -559,7 +561,7 @@ public:
* @param scaleSize - amount by which to scale size metrics
* appends any custom stats from the RecordStore or other unique stats
*/
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const = 0;
@@ -572,7 +574,7 @@ public:
*
* @param output (optional) - where to put detailed stats
*/
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const {
return Status(ErrorCodes::CommandNotSupported,
"this storage engine does not support touch");
}
@@ -584,7 +586,7 @@ public:
* If you don't implement the oplogStartHack, just use the default implementation which
* returns boost::none.
*/
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const {
return boost::none;
}
@@ -597,7 +599,7 @@ public:
* Since this is called inside of a WriteUnitOfWork while holding a std::mutex, it is
* illegal to acquire any LockManager locks inside of this function.
*/
- virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+ virtual Status oplogDiskLocRegister(OperationContext* opCtx, const Timestamp& opTime) {
return Status::OK();
}
@@ -608,12 +610,12 @@ public:
* It is only legal to call this on an oplog. It is illegal to call this inside a
* WriteUnitOfWork.
*/
- virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const = 0;
+ virtual void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const = 0;
/**
* Called after a repair operation is run with the recomputed numRecords and dataSize.
*/
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) = 0;
diff --git a/src/mongo/db/storage/record_store_test_capped_visibility.cpp b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
index 3669edb391f..8ced75f97be 100644
--- a/src/mongo/db/storage/record_store_test_capped_visibility.cpp
+++ b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
@@ -40,9 +40,9 @@
namespace mongo {
namespace {
-RecordId doInsert(unowned_ptr<OperationContext> txn, unowned_ptr<RecordStore> rs) {
+RecordId doInsert(unowned_ptr<OperationContext> opCtx, unowned_ptr<RecordStore> rs) {
static char zeros[16];
- return uassertStatusOK(rs->insertRecord(txn, zeros, sizeof(zeros), false));
+ return uassertStatusOK(rs->insertRecord(opCtx, zeros, sizeof(zeros), false));
}
// macro to keep assert line numbers correct.
@@ -76,27 +76,27 @@ TEST(RecordStore_CappedVisibility, EmptyInitialState) {
RecordId otherId;
{
- auto txn = harness->newOperationContext();
- WriteUnitOfWork wuow(txn.get());
+ auto opCtx = harness->newOperationContext();
+ WriteUnitOfWork wuow(opCtx.get());
// Can't see uncommitted write from other operation.
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT(!rs->getCursor(txn.get(), false)->next());
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT(!rs->getCursor(opCtx.get(), false)->next());
- otherId = doInsert(txn, rs);
+ otherId = doInsert(opCtx, rs);
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
wuow.commit();
- ASSERT(!rs->getCursor(txn.get(), true)->next());
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT(!rs->getCursor(opCtx.get(), true)->next());
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
}
// longLivedOp is still on old snapshot so it can't see otherId yet.
@@ -147,28 +147,28 @@ TEST(RecordStore_CappedVisibility, NonEmptyInitialState) {
RecordId otherId;
{
- auto txn = harness->newOperationContext();
- WriteUnitOfWork wuow(txn.get());
+ auto opCtx = harness->newOperationContext();
+ WriteUnitOfWork wuow(opCtx.get());
// Can only see committed writes from other operation.
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(initialId), initialId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(initialId), initialId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), initialId);
- otherId = doInsert(txn, rs);
+ otherId = doInsert(opCtx, rs);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
wuow.commit();
- ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
- ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), otherId);
- ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
- ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(opCtx.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(opCtx.get())->seekExact(lowestHiddenId));
}
// longLivedOp is still on old snapshot so it can't see otherId yet.
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h
index be52887cf2b..e20b32bcce4 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.h
+++ b/src/mongo/db/storage/record_store_test_updaterecord.h
@@ -40,14 +40,14 @@ namespace {
class UpdateNotifierSpy : public UpdateNotifier {
public:
- UpdateNotifierSpy(OperationContext* txn, const RecordId& loc, const char* buf, size_t size)
- : _txn(txn), _loc(loc), _data(buf, size), nInPlaceCalls(0) {}
+ UpdateNotifierSpy(OperationContext* opCtx, const RecordId& loc, const char* buf, size_t size)
+ : _opCtx(opCtx), _loc(loc), _data(buf, size), nInPlaceCalls(0) {}
~UpdateNotifierSpy() {}
- Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+ Status recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc) {
nInPlaceCalls++;
- ASSERT_EQUALS(_txn, txn);
+ ASSERT_EQUALS(_opCtx, opCtx);
ASSERT_EQUALS(_loc, loc);
return Status::OK();
}
@@ -57,7 +57,7 @@ public:
}
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
RecordId _loc;
std::string _data;
diff --git a/src/mongo/db/storage/snapshot_manager.h b/src/mongo/db/storage/snapshot_manager.h
index 8d4b81fd5c3..ef588d1c1a2 100644
--- a/src/mongo/db/storage/snapshot_manager.h
+++ b/src/mongo/db/storage/snapshot_manager.h
@@ -58,7 +58,7 @@ public:
* This must be the first method called after starting a ScopedTransaction, and it is
* illegal to start a WriteUnitOfWork inside of the same ScopedTransaction.
*/
- virtual Status prepareForCreateSnapshot(OperationContext* txn) = 0;
+ virtual Status prepareForCreateSnapshot(OperationContext* opCtx) = 0;
/**
* Creates a new named snapshot representing the same point-in-time captured in
@@ -68,7 +68,7 @@ public:
*
* Caller guarantees that this name must compare greater than all existing snapshots.
*/
- virtual Status createSnapshot(OperationContext* txn, const SnapshotName& name) = 0;
+ virtual Status createSnapshot(OperationContext* opCtx, const SnapshotName& name) = 0;
/**
* Sets the snapshot to be used for committed reads.
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index d21fb6e3c8f..7dfcc8554f3 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -75,18 +75,19 @@ public:
* Implementations can assume that 'this' index outlives its bulk
* builder.
*
- * @param txn the transaction under which keys are added to 'this' index
+ * @param opCtx the transaction under which keys are added to 'this' index
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*
* @return caller takes ownership
*/
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) = 0;
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx,
+ bool dupsAllowed) = 0;
/**
* Insert an entry into the index with the specified key and RecordId.
*
- * @param txn the transaction under which the insert takes place
+ * @param opCtx the transaction under which the insert takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*
@@ -95,7 +96,7 @@ public:
* ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
* at a RecordId other than 'loc' and duplicates were not allowed
*/
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) = 0;
@@ -103,11 +104,11 @@ public:
/**
* Remove the entry from the index with the specified key and RecordId.
*
- * @param txn the transaction under which the remove takes place
+ * @param opCtx the transaction under which the remove takes place
* @param dupsAllowed true if duplicate keys are allowed, and false
* otherwise
*/
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& loc,
bool dupsAllowed) = 0;
@@ -116,17 +117,19 @@ public:
* Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
* index at a RecordId other than 'loc', and Status::OK() otherwise.
*
- * @param txn the transaction under which this operation takes place
+ * @param opCtx the transaction under which this operation takes place
*
* TODO: Hide this by exposing an update method?
*/
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) = 0;
+ virtual Status dupKeyCheck(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& loc) = 0;
/**
* Attempt to reduce the storage space used by this index via compaction. Only called if the
* indexed record store supports compaction-in-place.
*/
- virtual Status compact(OperationContext* txn) {
+ virtual Status compact(OperationContext* opCtx) {
return Status::OK();
}
@@ -137,11 +140,11 @@ public:
/**
* TODO: expose full set of args for testing?
*/
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const = 0;
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const = 0;
@@ -149,16 +152,16 @@ public:
/**
* Return the number of bytes consumed by 'this' index.
*
- * @param txn the transaction under which this operation takes place
+ * @param opCtx the transaction under which this operation takes place
*
* @see IndexAccessMethod::getSpaceUsedBytes
*/
- virtual long long getSpaceUsedBytes(OperationContext* txn) const = 0;
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const = 0;
/**
* Return true if 'this' index is empty, and false otherwise.
*/
- virtual bool isEmpty(OperationContext* txn) = 0;
+ virtual bool isEmpty(OperationContext* opCtx) = 0;
/**
* Attempt to bring the entirety of 'this' index into memory.
@@ -168,7 +171,7 @@ public:
*
* @return Status::OK()
*/
- virtual Status touch(OperationContext* txn) const {
+ virtual Status touch(OperationContext* opCtx) const {
return Status(ErrorCodes::CommandNotSupported,
"this storage engine does not support touch");
}
@@ -179,9 +182,9 @@ public:
* The default implementation should be overridden with a more
* efficient one if at all possible.
*/
- virtual long long numEntries(OperationContext* txn) const {
+ virtual long long numEntries(OperationContext* opCtx) const {
long long x = -1;
- fullValidate(txn, &x, NULL);
+ fullValidate(opCtx, &x, NULL);
return x;
}
@@ -357,7 +360,7 @@ public:
*
* Implementations can assume that 'this' index outlives all cursors it produces.
*/
- virtual std::unique_ptr<Cursor> newCursor(OperationContext* txn,
+ virtual std::unique_ptr<Cursor> newCursor(OperationContext* opCtx,
bool isForward = true) const = 0;
/**
@@ -374,7 +377,7 @@ public:
* Implementations should avoid obvious biases toward older, newer, larger smaller or other
* specific classes of entries.
*/
- virtual std::unique_ptr<Cursor> newRandomCursor(OperationContext* txn) const {
+ virtual std::unique_ptr<Cursor> newRandomCursor(OperationContext* opCtx) const {
return {};
}
@@ -382,7 +385,7 @@ public:
// Index creation
//
- virtual Status initAsEmpty(OperationContext* txn) = 0;
+ virtual Status initAsEmpty(OperationContext* opCtx) = 0;
};
/**
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 57461bcf0d4..5c034ceedbc 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -47,22 +47,22 @@ auto mongo::SortedDataInterfaceHarnessHelper::newSortedDataInterface(
return index;
}
-void mongo::insertToIndex(unowned_ptr<OperationContext> txn,
+void mongo::insertToIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toInsert) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
for (auto&& entry : toInsert) {
- ASSERT_OK(index->insert(txn, entry.key, entry.loc, true));
+ ASSERT_OK(index->insert(opCtx, entry.key, entry.loc, true));
}
wuow.commit();
}
-void mongo::removeFromIndex(unowned_ptr<OperationContext> txn,
+void mongo::removeFromIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toRemove) {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
for (auto&& entry : toRemove) {
- index->unindex(txn, entry.key, entry.loc, true);
+ index->unindex(opCtx, entry.key, entry.loc, true);
}
wuow.commit();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index def0ed88813..e6f9443fd23 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -105,7 +105,7 @@ public:
*
* Should be used for declaring and changing conditions, not for testing inserts.
*/
-void insertToIndex(unowned_ptr<OperationContext> txn,
+void insertToIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toInsert);
@@ -122,7 +122,7 @@ inline void insertToIndex(unowned_ptr<HarnessHelper> harness,
*
* Should be used for declaring and changing conditions, not for testing removes.
*/
-void removeFromIndex(unowned_ptr<OperationContext> txn,
+void removeFromIndex(unowned_ptr<OperationContext> opCtx,
unowned_ptr<SortedDataInterface> index,
std::initializer_list<IndexKeyEntry> toRemove);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 02e9c1ff7aa..f3f00631c8d 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -197,17 +197,17 @@ public:
/**
* Closes all file handles associated with a database.
*/
- virtual Status closeDatabase(OperationContext* txn, StringData db) = 0;
+ virtual Status closeDatabase(OperationContext* opCtx, StringData db) = 0;
/**
* Deletes all data and metadata for a database.
*/
- virtual Status dropDatabase(OperationContext* txn, StringData db) = 0;
+ virtual Status dropDatabase(OperationContext* opCtx, StringData db) = 0;
/**
* @return number of files flushed
*/
- virtual int flushAllFiles(OperationContext* txn, bool sync) = 0;
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync) = 0;
/**
* Transitions the storage engine into backup mode.
@@ -228,7 +228,7 @@ public:
* retried, returns a non-OK status. This function may throw a WriteConflictException, which
* should trigger a retry by the caller. All other exceptions should be treated as errors.
*/
- virtual Status beginBackup(OperationContext* txn) {
+ virtual Status beginBackup(OperationContext* opCtx) {
return Status(ErrorCodes::CommandNotSupported,
"The current storage engine doesn't support backup mode");
}
@@ -240,7 +240,7 @@ public:
*
* Storage engines implementing this feature should fassert when unable to leave backup mode.
*/
- virtual void endBackup(OperationContext* txn) {
+ virtual void endBackup(OperationContext* opCtx) {
return;
}
@@ -253,7 +253,7 @@ public:
*
* NOTE: MMAPv1 does not support this method and has its own repairDatabase() method.
*/
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) = 0;
+ virtual Status repairRecordStore(OperationContext* opCtx, const std::string& ns) = 0;
/**
* This method will be called before there is a clean shutdown. Storage engines should
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index 0af1b78a602..dc21864d5b5 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -52,8 +52,9 @@ public:
return true;
}
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
- auto engine = txn->getClient()->getServiceContext()->getGlobalStorageEngine();
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const {
+ auto engine = opCtx->getClient()->getServiceContext()->getGlobalStorageEngine();
return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads"
<< bool(engine->getSnapshotManager())
<< "readOnly"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 5daafcbd8ea..5e70584a86d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -209,11 +209,11 @@ StatusWith<std::string> WiredTigerIndex::generateCreateString(const std::string&
return StatusWith<std::string>(ss);
}
-int WiredTigerIndex::Create(OperationContext* txn,
+int WiredTigerIndex::Create(OperationContext* opCtx,
const std::string& uri,
const std::string& config) {
// Don't use the session from the recovery unit: create should not be used in a transaction
- WiredTigerSession session(WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn());
+ WiredTigerSession session(WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn());
WT_SESSION* s = session.getSession();
LOG(1) << "create uri: " << uri << " config: " << config;
return s->create(s, uri.c_str(), config.c_str());
@@ -244,7 +244,7 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
version.getValue() == kKeyStringV1Version ? KeyString::Version::V1 : KeyString::Version::V0;
}
-Status WiredTigerIndex::insert(OperationContext* txn,
+Status WiredTigerIndex::insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed) {
@@ -255,21 +255,21 @@ Status WiredTigerIndex::insert(OperationContext* txn,
if (!s.isOK())
return s;
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
return _insert(c, key, id, dupsAllowed);
}
-void WiredTigerIndex::unindex(OperationContext* txn,
+void WiredTigerIndex::unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed) {
invariant(id.isNormal());
dassert(!hasFieldNames(key));
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -277,11 +277,11 @@ void WiredTigerIndex::unindex(OperationContext* txn,
_unindex(c, key, id, dupsAllowed);
}
-void WiredTigerIndex::fullValidate(OperationContext* txn,
+void WiredTigerIndex::fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const {
- if (fullResults && !WiredTigerRecoveryUnit::get(txn)->getSessionCache()->isEphemeral()) {
- int err = WiredTigerUtil::verifyTable(txn, _uri, &(fullResults->errors));
+ if (fullResults && !WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->isEphemeral()) {
+ int err = WiredTigerUtil::verifyTable(opCtx, _uri, &(fullResults->errors));
if (err == EBUSY) {
const char* msg = "verify() returned EBUSY. Not treating as invalid.";
warning() << msg;
@@ -298,7 +298,7 @@ void WiredTigerIndex::fullValidate(OperationContext* txn,
}
}
- auto cursor = newCursor(txn);
+ auto cursor = newCursor(opCtx);
long long count = 0;
TRACE_INDEX << " fullValidate";
@@ -312,12 +312,12 @@ void WiredTigerIndex::fullValidate(OperationContext* txn,
}
}
-bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
+bool WiredTigerIndex::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const {
{
BSONObjBuilder metadata(output->subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, uri(), &metadata);
+ Status status = WiredTigerUtil::getApplicationMetadata(opCtx, uri(), &metadata);
if (!status.isOK()) {
metadata.append("error", "unable to retrieve metadata");
metadata.append("code", static_cast<int>(status.code()));
@@ -325,8 +325,8 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
}
}
std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ WiredTigerUtil::fetchTypeAndSourceURI(opCtx, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(opCtx, sourceURI);
StringData creationStringName("creationString");
if (!metadataResult.isOK()) {
BSONObjBuilder creationString(output->subobjStart(creationStringName));
@@ -339,7 +339,7 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
output->append("type", type);
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
WT_SESSION* s = session->getSession();
Status status =
WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri(), "statistics=(fast)", output);
@@ -351,11 +351,13 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
return true;
}
-Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id) {
+Status WiredTigerIndex::dupKeyCheck(OperationContext* opCtx,
+ const BSONObj& key,
+ const RecordId& id) {
invariant(!hasFieldNames(key));
invariant(unique());
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
WT_CURSOR* c = curwrap.get();
if (isDup(c, key, id))
@@ -363,8 +365,8 @@ Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key, c
return Status::OK();
}
-bool WiredTigerIndex::isEmpty(OperationContext* txn) {
- WiredTigerCursor curwrap(_uri, _tableId, false, txn);
+bool WiredTigerIndex::isEmpty(OperationContext* opCtx) {
+ WiredTigerCursor curwrap(_uri, _tableId, false, opCtx);
WT_CURSOR* c = curwrap.get();
if (!c)
return true;
@@ -375,8 +377,8 @@ bool WiredTigerIndex::isEmpty(OperationContext* txn) {
return false;
}
-Status WiredTigerIndex::touch(OperationContext* txn) const {
- if (WiredTigerRecoveryUnit::get(txn)->getSessionCache()->isEphemeral()) {
+Status WiredTigerIndex::touch(OperationContext* opCtx) const {
+ if (WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->isEphemeral()) {
// Everything is already in memory.
return Status::OK();
}
@@ -384,9 +386,9 @@ Status WiredTigerIndex::touch(OperationContext* txn) const {
}
-long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* txn) const {
- auto ru = WiredTigerRecoveryUnit::get(txn);
- WiredTigerSession* session = ru->getSession(txn);
+long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* opCtx) const {
+ auto ru = WiredTigerRecoveryUnit::get(opCtx);
+ WiredTigerSession* session = ru->getSession(opCtx);
if (ru->getSessionCache()->isEphemeral()) {
// For ephemeral case, use cursor statistics
@@ -449,13 +451,13 @@ bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& id
return true;
}
-Status WiredTigerIndex::initAsEmpty(OperationContext* txn) {
+Status WiredTigerIndex::initAsEmpty(OperationContext* opCtx) {
// No-op
return Status::OK();
}
-Status WiredTigerIndex::compact(OperationContext* txn) {
- WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+Status WiredTigerIndex::compact(OperationContext* opCtx) {
+ WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
if (!cache->isEphemeral()) {
UniqueWiredTigerSession session = cache->getSession();
WT_SESSION* s = session->getSession();
@@ -472,10 +474,10 @@ Status WiredTigerIndex::compact(OperationContext* txn) {
*/
class WiredTigerIndex::BulkBuilder : public SortedDataBuilderInterface {
public:
- BulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
+ BulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx)
: _ordering(idx->_ordering),
- _txn(txn),
- _session(WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->getSession()),
+ _opCtx(opCtx),
+ _session(WiredTigerRecoveryUnit::get(_opCtx)->getSessionCache()->getSession()),
_cursor(openBulkCursor(idx)) {}
~BulkBuilder() {
@@ -486,7 +488,7 @@ protected:
WT_CURSOR* openBulkCursor(WiredTigerIndex* idx) {
// Open cursors can cause bulk open_cursor to fail with EBUSY.
// TODO any other cases that could cause EBUSY?
- WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn);
+ WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx);
outerSession->closeAllCursors();
// Not using cursor cache since we need to set "bulk".
@@ -509,7 +511,7 @@ protected:
}
const Ordering _ordering;
- OperationContext* const _txn;
+ OperationContext* const _opCtx;
UniqueWiredTigerSession const _session;
WT_CURSOR* const _cursor;
};
@@ -519,8 +521,8 @@ protected:
*/
class WiredTigerIndex::StandardBulkBuilder : public BulkBuilder {
public:
- StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
- : BulkBuilder(idx, txn), _idx(idx) {}
+ StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx)
+ : BulkBuilder(idx, opCtx), _idx(idx) {}
Status addKey(const BSONObj& key, const RecordId& id) {
{
@@ -549,7 +551,7 @@ public:
void commit(bool mayInterrupt) {
// TODO do we still need this?
// this is bizarre, but required as part of the contract
- WriteUnitOfWork uow(_txn);
+ WriteUnitOfWork uow(_opCtx);
uow.commit();
}
@@ -567,8 +569,8 @@ private:
*/
class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder {
public:
- UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* txn, bool dupsAllowed)
- : BulkBuilder(idx, txn),
+ UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx, bool dupsAllowed)
+ : BulkBuilder(idx, opCtx),
_idx(idx),
_dupsAllowed(dupsAllowed),
_keyString(idx->keyStringVersion()) {}
@@ -607,7 +609,7 @@ public:
}
void commit(bool mayInterrupt) {
- WriteUnitOfWork uow(_txn);
+ WriteUnitOfWork uow(_opCtx);
if (!_records.empty()) {
// This handles inserting the last unique key.
doInsert();
@@ -654,14 +656,14 @@ namespace {
*/
class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor {
public:
- WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : _txn(txn),
+ WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : _opCtx(opCtx),
_idx(idx),
_forward(forward),
_key(idx.keyStringVersion()),
_typeBits(idx.keyStringVersion()),
_query(idx.keyStringVersion()) {
- _cursor.emplace(_idx.uri(), _idx.tableId(), false, _txn);
+ _cursor.emplace(_idx.uri(), _idx.tableId(), false, _opCtx);
}
boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
// Advance on a cursor at the end is a no-op
@@ -739,11 +741,11 @@ public:
void restore() override {
if (!_cursor) {
- _cursor.emplace(_idx.uri(), _idx.tableId(), false, _txn);
+ _cursor.emplace(_idx.uri(), _idx.tableId(), false, _opCtx);
}
// Ensure an active session exists, so any restored cursors will bind to it
- invariant(WiredTigerRecoveryUnit::get(_txn)->getSession(_txn) == _cursor->getSession());
+ invariant(WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx) == _cursor->getSession());
if (!_eof) {
// Unique indices *don't* include the record id in their KeyStrings. If we seek to the
@@ -760,12 +762,12 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
_cursor = boost::none;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
// _cursor recreated in restore() to avoid risk of WT_ROLLBACK issues.
}
@@ -910,7 +912,7 @@ protected:
updateIdAndTypeBits();
}
- OperationContext* _txn;
+ OperationContext* _opCtx;
boost::optional<WiredTigerCursor> _cursor;
const WiredTigerIndex& _idx; // not owned
const bool _forward;
@@ -937,8 +939,8 @@ protected:
class WiredTigerIndexStandardCursor final : public WiredTigerIndexCursorBase {
public:
- WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {}
+ WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : WiredTigerIndexCursorBase(idx, opCtx, forward) {}
void updateIdAndTypeBits() override {
_id = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize());
@@ -953,8 +955,8 @@ public:
class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase {
public:
- WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {}
+ WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext* opCtx, bool forward)
+ : WiredTigerIndexCursorBase(idx, opCtx, forward) {}
void updateIdAndTypeBits() override {
// We assume that cursors can only ever see unique indexes in their "pristine" state,
@@ -1000,14 +1002,14 @@ WiredTigerIndexUnique::WiredTigerIndexUnique(OperationContext* ctx,
const IndexDescriptor* desc)
: WiredTigerIndex(ctx, uri, desc) {}
-std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(OperationContext* txn,
- bool forward) const {
- return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, txn, forward);
+std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(
+ OperationContext* opCtx, bool forward) const {
+ return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, opCtx, forward);
}
-SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* txn,
+SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* opCtx,
bool dupsAllowed) {
- return new UniqueBulkBuilder(this, txn, dupsAllowed);
+ return new UniqueBulkBuilder(this, opCtx, dupsAllowed);
}
Status WiredTigerIndexUnique::_insert(WT_CURSOR* c,
@@ -1164,15 +1166,15 @@ WiredTigerIndexStandard::WiredTigerIndexStandard(OperationContext* ctx,
: WiredTigerIndex(ctx, uri, desc) {}
std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexStandard::newCursor(
- OperationContext* txn, bool forward) const {
- return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, txn, forward);
+ OperationContext* opCtx, bool forward) const {
+ return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, opCtx, forward);
}
-SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* txn,
+SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* opCtx,
bool dupsAllowed) {
// We aren't unique so dups better be allowed.
invariant(dupsAllowed);
- return new StandardBulkBuilder(this, txn);
+ return new StandardBulkBuilder(this, opCtx);
}
Status WiredTigerIndexStandard::_insert(WT_CURSOR* c,
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index 6a2d49c7002..20485fa8f9d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -72,37 +72,37 @@ public:
* Creates a WiredTiger table suitable for implementing a MongoDB index.
* 'config' should be created with generateCreateString().
*/
- static int Create(OperationContext* txn, const std::string& uri, const std::string& config);
+ static int Create(OperationContext* opCtx, const std::string& uri, const std::string& config);
WiredTigerIndex(OperationContext* ctx, const std::string& uri, const IndexDescriptor* desc);
- virtual Status insert(OperationContext* txn,
+ virtual Status insert(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed);
- virtual void unindex(OperationContext* txn,
+ virtual void unindex(OperationContext* opCtx,
const BSONObj& key,
const RecordId& id,
bool dupsAllowed);
- virtual void fullValidate(OperationContext* txn,
+ virtual void fullValidate(OperationContext* opCtx,
long long* numKeysOut,
ValidateResults* fullResults) const;
- virtual bool appendCustomStats(OperationContext* txn,
+ virtual bool appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* output,
double scale) const;
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id);
+ virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key, const RecordId& id);
- virtual bool isEmpty(OperationContext* txn);
+ virtual bool isEmpty(OperationContext* opCtx);
- virtual Status touch(OperationContext* txn) const;
+ virtual Status touch(OperationContext* opCtx) const;
- virtual long long getSpaceUsedBytes(OperationContext* txn) const;
+ virtual long long getSpaceUsedBytes(OperationContext* opCtx) const;
- virtual Status initAsEmpty(OperationContext* txn);
+ virtual Status initAsEmpty(OperationContext* opCtx);
- virtual Status compact(OperationContext* txn);
+ virtual Status compact(OperationContext* opCtx);
const std::string& uri() const {
return _uri;
@@ -162,10 +162,10 @@ public:
const std::string& uri,
const IndexDescriptor* desc);
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) override;
bool unique() const override {
return true;
@@ -182,10 +182,10 @@ public:
const std::string& uri,
const IndexDescriptor* desc);
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* opCtx,
bool forward) const override;
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* opCtx, bool dupsAllowed) override;
bool unique() const override {
return false;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
index 47c1c81d352..eceeb4e28d7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
@@ -70,7 +70,7 @@ public:
std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
std::string ns = "test.wt";
- OperationContextNoop txn(newRecoveryUnit().release());
+ OperationContextNoop opCtx(newRecoveryUnit().release());
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
@@ -84,11 +84,11 @@ public:
ASSERT_OK(result.getStatus());
string uri = "table:" + ns;
- invariantWTOK(WiredTigerIndex::Create(&txn, uri, result.getValue()));
+ invariantWTOK(WiredTigerIndex::Create(&opCtx, uri, result.getValue()));
if (unique)
- return stdx::make_unique<WiredTigerIndexUnique>(&txn, uri, &desc);
- return stdx::make_unique<WiredTigerIndexStandard>(&txn, uri, &desc);
+ return stdx::make_unique<WiredTigerIndexUnique>(&opCtx, uri, &desc);
+ return stdx::make_unique<WiredTigerIndexStandard>(&opCtx, uri, &desc);
}
std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 682fd842aaf..060f4b58288 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -137,7 +137,7 @@ public:
TicketServerParameter(TicketHolder* holder, const std::string& name)
: ServerParameter(ServerParameterSet::getGlobal(), name, true, true), _holder(holder) {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name) {
b.append(name, _holder->outof());
}
@@ -401,7 +401,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) {
return wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:");
}
-int WiredTigerKVEngine::flushAllFiles(OperationContext* txn, bool sync) {
+int WiredTigerKVEngine::flushAllFiles(OperationContext* opCtx, bool sync) {
LOG(1) << "WiredTigerKVEngine::flushAllFiles";
if (_ephemeral) {
return 0;
@@ -412,7 +412,7 @@ int WiredTigerKVEngine::flushAllFiles(OperationContext* txn, bool sync) {
return 1;
}
-Status WiredTigerKVEngine::beginBackup(OperationContext* txn) {
+Status WiredTigerKVEngine::beginBackup(OperationContext* opCtx) {
invariant(!_backupSession);
// This cursor will be freed by the backupSession being closed as the session is uncached
@@ -427,7 +427,7 @@ Status WiredTigerKVEngine::beginBackup(OperationContext* txn) {
return Status::OK();
}
-void WiredTigerKVEngine::endBackup(OperationContext* txn) {
+void WiredTigerKVEngine::endBackup(OperationContext* opCtx) {
_backupSession.reset();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 8f632ef537c..1386eb7808d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -108,11 +108,11 @@ public:
StringData ident,
const RecordStore* originalRecordStore) const;
- virtual int flushAllFiles(OperationContext* txn, bool sync);
+ virtual int flushAllFiles(OperationContext* opCtx, bool sync);
- virtual Status beginBackup(OperationContext* txn);
+ virtual Status beginBackup(OperationContext* opCtx);
- virtual void endBackup(OperationContext* txn);
+ virtual void endBackup(OperationContext* opCtx);
virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 80b2344969c..d5a74f184ce 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -46,7 +46,7 @@ WiredTigerEngineRuntimeConfigParameter::WiredTigerEngineRuntimeConfigParameter(
_engine(engine) {}
-void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* txn,
+void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* opCtx,
BSONObjBuilder& b,
const std::string& name) {
b << name << "";
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
index 6742f76be99..9bc4699794f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
@@ -41,7 +41,7 @@ class WiredTigerEngineRuntimeConfigParameter : public ServerParameter {
public:
explicit WiredTigerEngineRuntimeConfigParameter(WiredTigerKVEngine* engine);
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual void append(OperationContext* opCtx, BSONObjBuilder& b, const std::string& name);
virtual Status set(const BSONElement& newValueElement);
virtual Status setFromString(const std::string& str);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 6a33378a070..a88754cf992 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -143,7 +143,7 @@ private:
OplogStones* _oplogStones;
};
-WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* txn, WiredTigerRecordStore* rs)
+WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs)
: _rs(rs) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -159,7 +159,7 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* txn, WiredTige
_minBytesPerStone = maxSize / _numStonesToKeep;
invariant(_minBytesPerStone > 0);
- _calculateStones(txn);
+ _calculateStones(opCtx);
_pokeReclaimThreadIfNeeded(); // Reclaim stones if over the limit.
}
@@ -227,13 +227,16 @@ void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRec
}
void WiredTigerRecordStore::OplogStones::updateCurrentStoneAfterInsertOnCommit(
- OperationContext* txn, int64_t bytesInserted, RecordId highestInserted, int64_t countInserted) {
- txn->recoveryUnit()->registerChange(
+ OperationContext* opCtx,
+ int64_t bytesInserted,
+ RecordId highestInserted,
+ int64_t countInserted) {
+ opCtx->recoveryUnit()->registerChange(
new InsertChange(this, bytesInserted, highestInserted, countInserted));
}
-void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* txn) {
- txn->recoveryUnit()->registerChange(new TruncateChange(this));
+void WiredTigerRecordStore::OplogStones::clearStonesOnCommit(OperationContext* opCtx) {
+ opCtx->recoveryUnit()->registerChange(new TruncateChange(this));
}
void WiredTigerRecordStore::OplogStones::updateStonesAfterCappedTruncateAfter(
@@ -285,9 +288,9 @@ void WiredTigerRecordStore::OplogStones::setNumStonesToKeep(size_t numStones) {
_numStonesToKeep = numStones;
}
-void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn) {
- long long numRecords = _rs->numRecords(txn);
- long long dataSize = _rs->dataSize(txn);
+void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* opCtx) {
+ long long numRecords = _rs->numRecords(opCtx);
+ long long dataSize = _rs->dataSize(opCtx);
log() << "The size storer reports that the oplog contains " << numRecords
<< " records totaling to " << dataSize << " bytes";
@@ -301,7 +304,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn)
if (numRecords <= 0 || dataSize <= 0 ||
uint64_t(numRecords) <
kMinSampleRatioForRandCursor * kRandomSamplesPerStone * _numStonesToKeep) {
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
@@ -311,16 +314,16 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn)
double estRecordsPerStone = std::ceil(_minBytesPerStone / avgRecordSize);
double estBytesPerStone = estRecordsPerStone * avgRecordSize;
- _calculateStonesBySampling(txn, int64_t(estRecordsPerStone), int64_t(estBytesPerStone));
+ _calculateStonesBySampling(opCtx, int64_t(estRecordsPerStone), int64_t(estBytesPerStone));
}
-void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationContext* txn) {
+void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationContext* opCtx) {
log() << "Scanning the oplog to determine where to place markers for truncation";
long long numRecords = 0;
long long dataSize = 0;
- auto cursor = _rs->getCursor(txn, true);
+ auto cursor = _rs->getCursor(opCtx, true);
while (auto record = cursor->next()) {
_currentRecords.addAndFetch(1);
int64_t newCurrentBytes = _currentBytes.addAndFetch(record->data.size());
@@ -336,10 +339,10 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationCon
dataSize += record->data.size();
}
- _rs->updateStatsAfterRepair(txn, numRecords, dataSize);
+ _rs->updateStatsAfterRepair(opCtx, numRecords, dataSize);
}
-void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationContext* txn,
+void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationContext* opCtx,
int64_t estRecordsPerStone,
int64_t estBytesPerStone) {
Timestamp earliestOpTime;
@@ -347,13 +350,13 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
{
const bool forward = true;
- auto cursor = _rs->getCursor(txn, forward);
+ auto cursor = _rs->getCursor(opCtx, forward);
auto record = cursor->next();
if (!record) {
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to determine the earliest optime, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
earliestOpTime = Timestamp(record->id.repr());
@@ -361,13 +364,13 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
{
const bool forward = false;
- auto cursor = _rs->getCursor(txn, forward);
+ auto cursor = _rs->getCursor(opCtx, forward);
auto record = cursor->next();
if (!record) {
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to determine the latest optime, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
latestOpTime = Timestamp(record->id.repr());
@@ -376,8 +379,8 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
log() << "Sampling from the oplog between " << earliestOpTime.toStringPretty() << " and "
<< latestOpTime.toStringPretty() << " to determine where to place markers for truncation";
- int64_t wholeStones = _rs->numRecords(txn) / estRecordsPerStone;
- int64_t numSamples = kRandomSamplesPerStone * _rs->numRecords(txn) / estRecordsPerStone;
+ int64_t wholeStones = _rs->numRecords(opCtx) / estRecordsPerStone;
+ int64_t numSamples = kRandomSamplesPerStone * _rs->numRecords(opCtx) / estRecordsPerStone;
log() << "Taking " << numSamples << " samples and assuming that each section of oplog contains"
<< " approximately " << estRecordsPerStone << " records totaling to " << estBytesPerStone
@@ -391,7 +394,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
// approximately 'estRecordsPerStone'. Do so by oversampling the oplog, sorting the samples in
// order of their RecordId, and then choosing the samples expected to be near the right edge of
// each logical section.
- auto cursor = _rs->getRandomCursorWithOptions(txn, extraConfig);
+ auto cursor = _rs->getRandomCursorWithOptions(opCtx, extraConfig);
std::vector<RecordId> oplogEstimates;
for (int i = 0; i < numSamples; ++i) {
auto record = cursor->next();
@@ -399,7 +402,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
// This shouldn't really happen unless the size storer values are far off from reality.
// The collection is probably empty, but fall back to scanning the oplog just in case.
log() << "Failed to get enough random samples, falling back to scanning the oplog";
- _calculateStonesByScanning(txn);
+ _calculateStonesByScanning(opCtx);
return;
}
oplogEstimates.push_back(record->id);
@@ -418,8 +421,8 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon
}
// Account for the partially filled chunk.
- _currentRecords.store(_rs->numRecords(txn) - estRecordsPerStone * wholeStones);
- _currentBytes.store(_rs->dataSize(txn) - estBytesPerStone * wholeStones);
+ _currentRecords.store(_rs->numRecords(opCtx) - estRecordsPerStone * wholeStones);
+ _currentBytes.store(_rs->dataSize(opCtx) - estBytesPerStone * wholeStones);
}
void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
@@ -430,12 +433,12 @@ void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() {
class WiredTigerRecordStore::Cursor final : public SeekableRecordCursor {
public:
- Cursor(OperationContext* txn, const WiredTigerRecordStore& rs, bool forward = true)
+ Cursor(OperationContext* opCtx, const WiredTigerRecordStore& rs, bool forward = true)
: _rs(rs),
- _txn(txn),
+ _opCtx(opCtx),
_forward(forward),
- _readUntilForOplog(WiredTigerRecoveryUnit::get(txn)->getOplogReadTill()) {
- _cursor.emplace(rs.getURI(), rs.tableId(), true, txn);
+ _readUntilForOplog(WiredTigerRecoveryUnit::get(opCtx)->getOplogReadTill()) {
+ _cursor.emplace(rs.getURI(), rs.tableId(), true, opCtx);
}
boost::optional<Record> next() final {
@@ -519,10 +522,10 @@ public:
bool restore() final {
if (!_cursor)
- _cursor.emplace(_rs.getURI(), _rs.tableId(), true, _txn);
+ _cursor.emplace(_rs.getURI(), _rs.tableId(), true, _opCtx);
// This will ensure an active session exists, so any restored cursors will bind to it
- invariant(WiredTigerRecoveryUnit::get(_txn)->getSession(_txn) == _cursor->getSession());
+ invariant(WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx) == _cursor->getSession());
_skipNextAdvance = false;
// If we've hit EOF, then this iterator is done and need not be restored.
@@ -566,12 +569,12 @@ public:
}
void detachFromOperationContext() final {
- _txn = nullptr;
+ _opCtx = nullptr;
_cursor = boost::none;
}
- void reattachToOperationContext(OperationContext* txn) final {
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ _opCtx = opCtx;
// _cursor recreated in restore() to avoid risk of WT_ROLLBACK issues.
}
@@ -598,7 +601,7 @@ private:
}
const WiredTigerRecordStore& _rs;
- OperationContext* _txn;
+ OperationContext* _opCtx;
const bool _forward;
bool _skipNextAdvance = false;
boost::optional<WiredTigerCursor> _cursor;
@@ -629,8 +632,8 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
class WiredTigerRecordStore::RandomCursor final : public RecordCursor {
public:
- RandomCursor(OperationContext* txn, const WiredTigerRecordStore& rs, StringData config)
- : _cursor(nullptr), _rs(&rs), _txn(txn), _config(config.toString() + ",next_random") {
+ RandomCursor(OperationContext* opCtx, const WiredTigerRecordStore& rs, StringData config)
+ : _cursor(nullptr), _rs(&rs), _opCtx(opCtx), _config(config.toString() + ",next_random") {
restore();
}
@@ -668,7 +671,7 @@ public:
bool restore() final {
// We can't use the CursorCache since this cursor needs a special config string.
- WT_SESSION* session = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(_opCtx)->getSession(_opCtx)->getSession();
if (!_cursor) {
invariantWTOK(session->open_cursor(
@@ -678,22 +681,22 @@ public:
return true;
}
void detachFromOperationContext() final {
- invariant(_txn);
- _txn = nullptr;
+ invariant(_opCtx);
+ _opCtx = nullptr;
if (_cursor) {
invariantWTOK(_cursor->close(_cursor));
}
_cursor = nullptr;
}
- void reattachToOperationContext(OperationContext* txn) final {
- invariant(!_txn);
- _txn = txn;
+ void reattachToOperationContext(OperationContext* opCtx) final {
+ invariant(!_opCtx);
+ _opCtx = opCtx;
}
private:
WT_CURSOR* _cursor;
const WiredTigerRecordStore* _rs;
- OperationContext* _txn;
+ OperationContext* _opCtx;
const std::string _config;
};
@@ -878,11 +881,11 @@ bool WiredTigerRecordStore::inShutdown() const {
return _shuttingDown;
}
-long long WiredTigerRecordStore::dataSize(OperationContext* txn) const {
+long long WiredTigerRecordStore::dataSize(OperationContext* opCtx) const {
return _dataSize.load();
}
-long long WiredTigerRecordStore::numRecords(OperationContext* txn) const {
+long long WiredTigerRecordStore::numRecords(OperationContext* opCtx) const {
return _numRecords.load();
}
@@ -900,13 +903,13 @@ int64_t WiredTigerRecordStore::cappedMaxSize() const {
return _cappedMaxSize;
}
-int64_t WiredTigerRecordStore::storageSize(OperationContext* txn,
+int64_t WiredTigerRecordStore::storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo,
int infoLevel) const {
if (_isEphemeral) {
- return dataSize(txn);
+ return dataSize(opCtx);
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
StatusWith<int64_t> result =
WiredTigerUtil::getStatisticsValueAs<int64_t>(session->getSession(),
"statistics:" + getURI(),
@@ -934,9 +937,9 @@ RecordData WiredTigerRecordStore::_getData(const WiredTigerCursor& cursor) const
return RecordData(data, value.size);
}
-RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& id) const {
+RecordData WiredTigerRecordStore::dataFor(OperationContext* opCtx, const RecordId& id) const {
// ownership passes to the shared_array created below
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* c = curwrap.get();
invariant(c);
c->set_key(c, _makeKey(id));
@@ -946,10 +949,10 @@ RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId&
return _getData(curwrap);
}
-bool WiredTigerRecordStore::findRecord(OperationContext* txn,
+bool WiredTigerRecordStore::findRecord(OperationContext* opCtx,
const RecordId& id,
RecordData* out) const {
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* c = curwrap.get();
invariant(c);
c->set_key(c, _makeKey(id));
@@ -962,12 +965,12 @@ bool WiredTigerRecordStore::findRecord(OperationContext* txn,
return true;
}
-void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& id) {
+void WiredTigerRecordStore::deleteRecord(OperationContext* opCtx, const RecordId& id) {
// Deletes should never occur on a capped collection because truncation uses
// WT_SESSION::truncate().
invariant(!isCapped());
- WiredTigerCursor cursor(_uri, _tableId, true, txn);
+ WiredTigerCursor cursor(_uri, _tableId, true, opCtx);
cursor.assertInActiveTxn();
WT_CURSOR* c = cursor.get();
c->set_key(c, _makeKey(id));
@@ -983,8 +986,8 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId&
ret = WT_OP_CHECK(c->remove(c));
invariantWTOK(ret);
- _changeNumRecords(txn, -1);
- _increaseDataSize(txn, -old_length);
+ _changeNumRecords(opCtx, -1);
+ _increaseDataSize(opCtx, -old_length);
}
bool WiredTigerRecordStore::cappedAndNeedDelete() const {
@@ -1000,7 +1003,7 @@ bool WiredTigerRecordStore::cappedAndNeedDelete() const {
return false;
}
-int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* opCtx,
const RecordId& justInserted) {
invariant(!_oplogStones);
@@ -1040,20 +1043,20 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
}
}
- return cappedDeleteAsNeeded_inlock(txn, justInserted);
+ return cappedDeleteAsNeeded_inlock(opCtx, justInserted);
}
-int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* opCtx,
const RecordId& justInserted) {
// we do this in a side transaction in case it aborts
WiredTigerRecoveryUnit* realRecoveryUnit =
- checked_cast<WiredTigerRecoveryUnit*>(txn->releaseRecoveryUnit());
+ checked_cast<WiredTigerRecoveryUnit*>(opCtx->releaseRecoveryUnit());
invariant(realRecoveryUnit);
WiredTigerSessionCache* sc = realRecoveryUnit->getSessionCache();
OperationContext::RecoveryUnitState const realRUstate =
- txn->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);
+ opCtx->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
int64_t dataSize = _dataSize.load();
int64_t numRecords = _numRecords.load();
@@ -1065,9 +1068,9 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
docsOverCap = numRecords - _cappedMaxDocs;
try {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* truncateEnd = curwrap.get();
RecordId newestIdToDelete;
int ret = 0;
@@ -1109,7 +1112,7 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
stdx::lock_guard<stdx::mutex> cappedCallbackLock(_cappedCallbackMutex);
if (_cappedCallback) {
uassertStatusOK(_cappedCallback->aboutToDeleteCapped(
- txn,
+ opCtx,
newestIdToDelete,
RecordData(static_cast<const char*>(old_value.data), old_value.size)));
}
@@ -1136,7 +1139,7 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
}
invariantWTOK(truncateEnd->prev(truncateEnd)); // put the cursor back where it was
- WiredTigerCursor startWrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startWrap(_uri, _tableId, true, opCtx);
WT_CURSOR* truncateStart = startWrap.get();
// If we know where the start point is, set it for the truncate
@@ -1153,35 +1156,35 @@ int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn
docsRemoved = 0;
} else {
invariantWTOK(ret);
- _changeNumRecords(txn, -docsRemoved);
- _increaseDataSize(txn, -sizeSaved);
+ _changeNumRecords(opCtx, -docsRemoved);
+ _increaseDataSize(opCtx, -sizeSaved);
wuow.commit();
// Save the key for the next round
_cappedFirstRecord = firstRemainingId;
}
}
} catch (const WriteConflictException& wce) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
log() << "got conflict truncating capped, ignoring";
return 0;
} catch (...) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
throw;
}
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ delete opCtx->releaseRecoveryUnit();
+ opCtx->setRecoveryUnit(realRecoveryUnit, realRUstate);
return docsRemoved;
}
-bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext* txn) {
+bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext* opCtx) {
// Create another reference to the oplog stones while holding a lock on the collection to
// prevent it from being destructed.
std::shared_ptr<OplogStones> oplogStones = _oplogStones;
- Locker* locker = txn->lockState();
+ Locker* locker = opCtx->lockState();
Locker::LockSnapshot snapshot;
// Release any locks before waiting on the condition variable. It is illegal to access any
@@ -1191,7 +1194,7 @@ bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext*
// The top-level locks were freed, so also release any potential low-level (storage engine)
// locks that might be held.
- txn->recoveryUnit()->abandonSnapshot();
+ opCtx->recoveryUnit()->abandonSnapshot();
// Wait for an oplog deletion request, or for this record store to have been destroyed.
oplogStones->awaitHasExcessStonesOrDead();
@@ -1202,7 +1205,7 @@ bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext*
return !oplogStones->isDead();
}
-void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
+void WiredTigerRecordStore::reclaimOplog(OperationContext* opCtx) {
while (auto stone = _oplogStones->peekOldestStoneIfNeeded()) {
invariant(stone->lastRecord.isNormal());
@@ -1210,23 +1213,23 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
<< stone->lastRecord << " to remove approximately " << stone->records
<< " records totaling to " << stone->bytes << " bytes";
- WiredTigerRecoveryUnit* ru = WiredTigerRecoveryUnit::get(txn);
- WT_SESSION* session = ru->getSession(txn)->getSession();
+ WiredTigerRecoveryUnit* ru = WiredTigerRecoveryUnit::get(opCtx);
+ WT_SESSION* session = ru->getSession(opCtx)->getSession();
try {
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor startwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startwrap.get();
start->set_key(start, _makeKey(_oplogStones->firstRecord));
- WiredTigerCursor endwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor endwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* end = endwrap.get();
end->set_key(end, _makeKey(stone->lastRecord));
invariantWTOK(session->truncate(session, nullptr, start, end, nullptr));
- _changeNumRecords(txn, -stone->records);
- _increaseDataSize(txn, -stone->bytes);
+ _changeNumRecords(opCtx, -stone->records);
+ _increaseDataSize(opCtx, -stone->bytes);
wuow.commit();
@@ -1244,13 +1247,13 @@ void WiredTigerRecordStore::reclaimOplog(OperationContext* txn) {
<< " records totaling to " << _dataSize.load() << " bytes";
}
-Status WiredTigerRecordStore::insertRecords(OperationContext* txn,
+Status WiredTigerRecordStore::insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota) {
- return _insertRecords(txn, records->data(), records->size());
+ return _insertRecords(opCtx, records->data(), records->size());
}
-Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
+Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx,
Record* records,
size_t nRecords) {
// We are kind of cheating on capped collections since we write all of them at once ....
@@ -1263,7 +1266,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
if (_isCapped && totalLength > _cappedMaxSize)
return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -1281,7 +1284,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
} else if (_isCapped) {
stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex);
record.id = _nextId();
- _addUncommittedRecordId_inlock(txn, record.id);
+ _addUncommittedRecordId_inlock(opCtx, record.id);
} else {
record.id = _nextId();
}
@@ -1305,24 +1308,25 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* txn,
return wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord");
}
- _changeNumRecords(txn, nRecords);
- _increaseDataSize(txn, totalLength);
+ _changeNumRecords(opCtx, nRecords);
+ _increaseDataSize(opCtx, totalLength);
if (_oplogStones) {
- _oplogStones->updateCurrentStoneAfterInsertOnCommit(txn, totalLength, highestId, nRecords);
+ _oplogStones->updateCurrentStoneAfterInsertOnCommit(
+ opCtx, totalLength, highestId, nRecords);
} else {
- cappedDeleteAsNeeded(txn, highestId);
+ cappedDeleteAsNeeded(opCtx, highestId);
}
return Status::OK();
}
-StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
+StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota) {
Record record = {RecordId(), RecordData(data, len)};
- Status status = _insertRecords(txn, &record, 1);
+ Status status = _insertRecords(opCtx, &record, 1);
if (!status.isOK())
return StatusWith<RecordId>(status);
return StatusWith<RecordId>(record.id);
@@ -1362,7 +1366,7 @@ RecordId WiredTigerRecordStore::lowestCappedHiddenRecord() const {
return _uncommittedRecordIds.empty() ? RecordId() : _uncommittedRecordIds.front();
}
-Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
+Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut) {
@@ -1388,7 +1392,7 @@ Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
}
invariant(pos == (buffer.get() + totalSize));
- Status s = _insertRecords(txn, records.get(), nDocs);
+ Status s = _insertRecords(opCtx, records.get(), nDocs);
if (!s.isOK())
return s;
@@ -1401,13 +1405,13 @@ Status WiredTigerRecordStore::insertRecordsWithDocWriter(OperationContext* txn,
return s;
}
-Status WiredTigerRecordStore::updateRecord(OperationContext* txn,
+Status WiredTigerRecordStore::updateRecord(OperationContext* opCtx,
const RecordId& id,
const char* data,
int len,
bool enforceQuota,
UpdateNotifier* notifier) {
- WiredTigerCursor curwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor curwrap(_uri, _tableId, true, opCtx);
curwrap.assertInActiveTxn();
WT_CURSOR* c = curwrap.get();
invariant(c);
@@ -1431,9 +1435,9 @@ Status WiredTigerRecordStore::updateRecord(OperationContext* txn,
ret = WT_OP_CHECK(c->insert(c));
invariantWTOK(ret);
- _increaseDataSize(txn, len - old_length);
+ _increaseDataSize(opCtx, len - old_length);
if (!_oplogStones) {
- cappedDeleteAsNeeded(txn, id);
+ cappedDeleteAsNeeded(opCtx, id);
}
return Status::OK();
@@ -1444,7 +1448,7 @@ bool WiredTigerRecordStore::updateWithDamagesSupported() const {
}
StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages(
- OperationContext* txn,
+ OperationContext* opCtx,
const RecordId& id,
const RecordData& oldRec,
const char* damageSource,
@@ -1461,41 +1465,42 @@ void WiredTigerRecordStore::_oplogSetStartHack(WiredTigerRecoveryUnit* wru) cons
}
}
-std::unique_ptr<SeekableRecordCursor> WiredTigerRecordStore::getCursor(OperationContext* txn,
+std::unique_ptr<SeekableRecordCursor> WiredTigerRecordStore::getCursor(OperationContext* opCtx,
bool forward) const {
if (_isOplog && forward) {
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(opCtx);
// If we already have a snapshot we don't know what it can see, unless we know no one
// else could be writing (because we hold an exclusive lock).
- if (wru->inActiveTxn() && !txn->lockState()->isNoop() &&
- !txn->lockState()->isCollectionLockedForMode(_ns, MODE_X)) {
+ if (wru->inActiveTxn() && !opCtx->lockState()->isNoop() &&
+ !opCtx->lockState()->isCollectionLockedForMode(_ns, MODE_X)) {
throw WriteConflictException();
}
_oplogSetStartHack(wru);
}
- return stdx::make_unique<Cursor>(txn, *this, forward);
+ return stdx::make_unique<Cursor>(opCtx, *this, forward);
}
-std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursor(OperationContext* txn) const {
+std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursor(
+ OperationContext* opCtx) const {
const char* extraConfig = "";
- return getRandomCursorWithOptions(txn, extraConfig);
+ return getRandomCursorWithOptions(opCtx, extraConfig);
}
std::unique_ptr<RecordCursor> WiredTigerRecordStore::getRandomCursorWithOptions(
- OperationContext* txn, StringData extraConfig) const {
- return stdx::make_unique<RandomCursor>(txn, *this, extraConfig);
+ OperationContext* opCtx, StringData extraConfig) const {
+ return stdx::make_unique<RandomCursor>(opCtx, *this, extraConfig);
}
std::vector<std::unique_ptr<RecordCursor>> WiredTigerRecordStore::getManyCursors(
- OperationContext* txn) const {
+ OperationContext* opCtx) const {
std::vector<std::unique_ptr<RecordCursor>> cursors(1);
- cursors[0] = stdx::make_unique<Cursor>(txn, *this, /*forward=*/true);
+ cursors[0] = stdx::make_unique<Cursor>(opCtx, *this, /*forward=*/true);
return cursors;
}
-Status WiredTigerRecordStore::truncate(OperationContext* txn) {
- WiredTigerCursor startWrap(_uri, _tableId, true, txn);
+Status WiredTigerRecordStore::truncate(OperationContext* opCtx) {
+ WiredTigerCursor startWrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startWrap.get();
int ret = WT_OP_CHECK(start->next(start));
// Empty collections don't have anything to truncate.
@@ -1504,23 +1509,23 @@ Status WiredTigerRecordStore::truncate(OperationContext* txn) {
}
invariantWTOK(ret);
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
- _changeNumRecords(txn, -numRecords(txn));
- _increaseDataSize(txn, -dataSize(txn));
+ _changeNumRecords(opCtx, -numRecords(opCtx));
+ _increaseDataSize(opCtx, -dataSize(opCtx));
if (_oplogStones) {
- _oplogStones->clearStonesOnCommit(txn);
+ _oplogStones->clearStonesOnCommit(opCtx);
}
return Status::OK();
}
-Status WiredTigerRecordStore::compact(OperationContext* txn,
+Status WiredTigerRecordStore::compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats) {
- WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
if (!cache->isEphemeral()) {
UniqueWiredTigerSession session = cache->getSession();
WT_SESSION* s = session->getSession();
@@ -1530,13 +1535,13 @@ Status WiredTigerRecordStore::compact(OperationContext* txn,
return Status::OK();
}
-Status WiredTigerRecordStore::validate(OperationContext* txn,
+Status WiredTigerRecordStore::validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output) {
if (!_isEphemeral) {
- int err = WiredTigerUtil::verifyTable(txn, _uri, &results->errors);
+ int err = WiredTigerUtil::verifyTable(opCtx, _uri, &results->errors);
if (err == EBUSY) {
const char* msg = "verify() returned EBUSY. Not treating as invalid.";
warning() << msg;
@@ -1558,12 +1563,12 @@ Status WiredTigerRecordStore::validate(OperationContext* txn,
long long nInvalid = 0;
results->valid = true;
- Cursor cursor(txn, *this, true);
+ Cursor cursor(opCtx, *this, true);
int interruptInterval = 4096;
while (auto record = cursor.next()) {
if (!(nrecords % interruptInterval))
- txn->checkForInterrupt();
+ opCtx->checkForInterrupt();
++nrecords;
auto dataSize = record->data.size();
dataSizeTotal += dataSize;
@@ -1606,7 +1611,7 @@ Status WiredTigerRecordStore::validate(OperationContext* txn,
return Status::OK();
}
-void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
+void WiredTigerRecordStore::appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const {
result->appendBool("capped", _isCapped);
@@ -1616,12 +1621,12 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
result->appendIntOrLL("sleepCount", _cappedSleep.load());
result->appendIntOrLL("sleepMS", _cappedSleepMS.load());
}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
WT_SESSION* s = session->getSession();
BSONObjBuilder bob(result->subobjStart(_engineName));
{
BSONObjBuilder metadata(bob.subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, getURI(), &metadata);
+ Status status = WiredTigerUtil::getApplicationMetadata(opCtx, getURI(), &metadata);
if (!status.isOK()) {
metadata.append("error", "unable to retrieve metadata");
metadata.append("code", static_cast<int>(status.code()));
@@ -1630,8 +1635,8 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
}
std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ WiredTigerUtil::fetchTypeAndSourceURI(opCtx, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(opCtx, sourceURI);
StringData creationStringName("creationString");
if (!metadataResult.isOK()) {
BSONObjBuilder creationString(bob.subobjStart(creationStringName));
@@ -1653,7 +1658,7 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
}
}
-Status WiredTigerRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
+Status WiredTigerRecordStore::touch(OperationContext* opCtx, BSONObjBuilder* output) const {
if (_isEphemeral) {
// Everything is already in memory.
return Status::OK();
@@ -1661,13 +1666,14 @@ Status WiredTigerRecordStore::touch(OperationContext* txn, BSONObjBuilder* outpu
return Status(ErrorCodes::CommandNotSupported, "this storage engine does not support touch");
}
-Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* opCtx,
+ const Timestamp& opTime) {
StatusWith<RecordId> id = oploghack::keyForOptime(opTime);
if (!id.isOK())
return id.getStatus();
stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex);
- _addUncommittedRecordId_inlock(txn, id.getValue());
+ _addUncommittedRecordId_inlock(opCtx, id.getValue());
return Status::OK();
}
@@ -1733,38 +1739,38 @@ void WiredTigerRecordStore::_oplogJournalThreadLoop(WiredTigerSessionCache* sess
std::terminate();
}
-void WiredTigerRecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const {
- invariant(txn->lockState()->isNoop() || !txn->lockState()->inAWriteUnitOfWork());
+void WiredTigerRecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const {
+ invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork());
// This function must not start a WT transaction, otherwise we will get stuck in an infinite
// loop of WCE handling when the getCursor() is called.
stdx::unique_lock<stdx::mutex> lk(_uncommittedRecordIdsMutex);
const auto waitingFor = _oplog_highestSeen;
- txn->waitForConditionOrInterrupt(_opsBecameVisibleCV, lk, [&] {
+ opCtx->waitForConditionOrInterrupt(_opsBecameVisibleCV, lk, [&] {
return _uncommittedRecordIds.empty() || _uncommittedRecordIds.front() > waitingFor;
});
}
-void WiredTigerRecordStore::_addUncommittedRecordId_inlock(OperationContext* txn, RecordId id) {
+void WiredTigerRecordStore::_addUncommittedRecordId_inlock(OperationContext* opCtx, RecordId id) {
dassert(_uncommittedRecordIds.empty() || _uncommittedRecordIds.back() < id);
SortedRecordIds::iterator it = _uncommittedRecordIds.insert(_uncommittedRecordIds.end(), id);
invariant(it->isNormal());
- txn->recoveryUnit()->registerChange(new CappedInsertChange(this, it));
+ opCtx->recoveryUnit()->registerChange(new CappedInsertChange(this, it));
_oplog_highestSeen = id;
}
boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
- OperationContext* txn, const RecordId& startingPosition) const {
+ OperationContext* opCtx, const RecordId& startingPosition) const {
if (!_useOplogHack)
return boost::none;
{
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(opCtx);
_oplogSetStartHack(wru);
}
- WiredTigerCursor cursor(_uri, _tableId, true, txn);
+ WiredTigerCursor cursor(_uri, _tableId, true, opCtx);
WT_CURSOR* c = cursor.get();
int cmp;
@@ -1782,7 +1788,7 @@ boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
return _fromKey(key);
}
-void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* txn,
+void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize) {
_numRecords.store(numRecords);
@@ -1800,8 +1806,8 @@ RecordId WiredTigerRecordStore::_nextId() {
return out;
}
-WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit(OperationContext* txn) {
- return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit(OperationContext* opCtx) {
+ return checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
}
class WiredTigerRecordStore::NumRecordsChange : public RecoveryUnit::Change {
@@ -1817,8 +1823,8 @@ private:
int64_t _diff;
};
-void WiredTigerRecordStore::_changeNumRecords(OperationContext* txn, int64_t diff) {
- txn->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
+void WiredTigerRecordStore::_changeNumRecords(OperationContext* opCtx, int64_t diff) {
+ opCtx->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
if (_numRecords.fetchAndAdd(diff) < 0)
_numRecords.store(std::max(diff, int64_t(0)));
}
@@ -1836,9 +1842,9 @@ private:
int64_t _amount;
};
-void WiredTigerRecordStore::_increaseDataSize(OperationContext* txn, int64_t amount) {
- if (txn)
- txn->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
+void WiredTigerRecordStore::_increaseDataSize(OperationContext* opCtx, int64_t amount) {
+ if (opCtx)
+ opCtx->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
if (_dataSize.fetchAndAdd(amount) < 0)
_dataSize.store(std::max(amount, int64_t(0)));
@@ -1855,10 +1861,10 @@ RecordId WiredTigerRecordStore::_fromKey(int64_t key) {
return RecordId(key);
}
-void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
+void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* opCtx,
RecordId end,
bool inclusive) {
- Cursor cursor(txn, *this);
+ Cursor cursor(opCtx, *this);
auto record = cursor.seekExact(end);
massert(28807, str::stream() << "Failed to seek to the record located at " << end, record);
@@ -1869,7 +1875,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
RecordId firstRemovedId;
if (inclusive) {
- Cursor reverseCursor(txn, *this, false);
+ Cursor reverseCursor(opCtx, *this, false);
invariant(reverseCursor.seekExact(end));
auto prev = reverseCursor.next();
lastKeptId = prev ? prev->id : RecordId();
@@ -1891,7 +1897,7 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
do {
if (_cappedCallback) {
uassertStatusOK(
- _cappedCallback->aboutToDeleteCapped(txn, record->id, record->data));
+ _cappedCallback->aboutToDeleteCapped(opCtx, record->id, record->data));
}
recordsRemoved++;
bytesRemoved += record->data.size();
@@ -1900,17 +1906,17 @@ void WiredTigerRecordStore::cappedTruncateAfter(OperationContext* txn,
// Truncate the collection starting from the record located at 'firstRemovedId' to the end of
// the collection.
- WriteUnitOfWork wuow(txn);
+ WriteUnitOfWork wuow(opCtx);
- WiredTigerCursor startwrap(_uri, _tableId, true, txn);
+ WiredTigerCursor startwrap(_uri, _tableId, true, opCtx);
WT_CURSOR* start = startwrap.get();
start->set_key(start, _makeKey(firstRemovedId));
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
invariantWTOK(session->truncate(session, nullptr, start, nullptr, nullptr));
- _changeNumRecords(txn, -recordsRemoved);
- _increaseDataSize(txn, -bytesRemoved);
+ _changeNumRecords(opCtx, -recordsRemoved);
+ _increaseDataSize(opCtx, -bytesRemoved);
wuow.commit();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 50360324e75..131797e5fb5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -86,7 +86,7 @@ public:
const CollectionOptions& options,
StringData extraStrings);
- WiredTigerRecordStore(OperationContext* txn,
+ WiredTigerRecordStore(OperationContext* opCtx,
StringData ns,
StringData uri,
std::string engineName,
@@ -102,39 +102,39 @@ public:
// name of the RecordStore implementation
virtual const char* name() const;
- virtual long long dataSize(OperationContext* txn) const;
+ virtual long long dataSize(OperationContext* opCtx) const;
- virtual long long numRecords(OperationContext* txn) const;
+ virtual long long numRecords(OperationContext* opCtx) const;
virtual bool isCapped() const;
- virtual int64_t storageSize(OperationContext* txn,
+ virtual int64_t storageSize(OperationContext* opCtx,
BSONObjBuilder* extraInfo = NULL,
int infoLevel = 0) const;
// CRUD related
- virtual RecordData dataFor(OperationContext* txn, const RecordId& id) const;
+ virtual RecordData dataFor(OperationContext* opCtx, const RecordId& id) const;
- virtual bool findRecord(OperationContext* txn, const RecordId& id, RecordData* out) const;
+ virtual bool findRecord(OperationContext* opCtx, const RecordId& id, RecordData* out) const;
- virtual void deleteRecord(OperationContext* txn, const RecordId& id);
+ virtual void deleteRecord(OperationContext* opCtx, const RecordId& id);
- virtual Status insertRecords(OperationContext* txn,
+ virtual Status insertRecords(OperationContext* opCtx,
std::vector<Record>* records,
bool enforceQuota);
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ virtual StatusWith<RecordId> insertRecord(OperationContext* opCtx,
const char* data,
int len,
bool enforceQuota);
- virtual Status insertRecordsWithDocWriter(OperationContext* txn,
+ virtual Status insertRecordsWithDocWriter(OperationContext* opCtx,
const DocWriter* const* docs,
size_t nDocs,
RecordId* idsOut);
- virtual Status updateRecord(OperationContext* txn,
+ virtual Status updateRecord(OperationContext* opCtx,
const RecordId& oldLocation,
const char* data,
int len,
@@ -143,22 +143,22 @@ public:
virtual bool updateWithDamagesSupported() const;
- virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn,
+ virtual StatusWith<RecordData> updateWithDamages(OperationContext* opCtx,
const RecordId& id,
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages);
- std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* txn,
+ std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward) const final;
- std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* txn) const final;
+ std::unique_ptr<RecordCursor> getRandomCursor(OperationContext* opCtx) const final;
- std::unique_ptr<RecordCursor> getRandomCursorWithOptions(OperationContext* txn,
+ std::unique_ptr<RecordCursor> getRandomCursorWithOptions(OperationContext* opCtx,
StringData extraConfig) const;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* opCtx) const final;
- virtual Status truncate(OperationContext* txn);
+ virtual Status truncate(OperationContext* opCtx);
virtual bool compactSupported() const {
return !_isEphemeral;
@@ -167,36 +167,36 @@ public:
return true;
}
- virtual Status compact(OperationContext* txn,
+ virtual Status compact(OperationContext* opCtx,
RecordStoreCompactAdaptor* adaptor,
const CompactOptions* options,
CompactStats* stats);
- virtual Status validate(OperationContext* txn,
+ virtual Status validate(OperationContext* opCtx,
ValidateCmdLevel level,
ValidateAdaptor* adaptor,
ValidateResults* results,
BSONObjBuilder* output);
- virtual void appendCustomStats(OperationContext* txn,
+ virtual void appendCustomStats(OperationContext* opCtx,
BSONObjBuilder* result,
double scale) const;
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+ virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const;
- virtual void cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+ virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive);
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* opCtx,
const RecordId& startingPosition) const;
- virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime);
+ virtual Status oplogDiskLocRegister(OperationContext* opCtx, const Timestamp& opTime);
- virtual void updateStatsAfterRepair(OperationContext* txn,
+ virtual void updateStatsAfterRepair(OperationContext* opCtx,
long long numRecords,
long long dataSize);
- void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override;
+ void waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCtx) const override;
bool isOplog() const {
return _isOplog;
@@ -229,18 +229,18 @@ public:
bool inShutdown() const;
- void reclaimOplog(OperationContext* txn);
+ void reclaimOplog(OperationContext* opCtx);
- int64_t cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted);
+ int64_t cappedDeleteAsNeeded(OperationContext* opCtx, const RecordId& justInserted);
- int64_t cappedDeleteAsNeeded_inlock(OperationContext* txn, const RecordId& justInserted);
+ int64_t cappedDeleteAsNeeded_inlock(OperationContext* opCtx, const RecordId& justInserted);
boost::timed_mutex& cappedDeleterMutex() { // NOLINT
return _cappedDeleterMutex;
}
// Returns false if the oplog was dropped while waiting for a deletion request.
- bool yieldAndAwaitOplogDeletionRequest(OperationContext* txn);
+ bool yieldAndAwaitOplogDeletionRequest(OperationContext* opCtx);
class OplogStones;
@@ -257,21 +257,21 @@ private:
class NumRecordsChange;
class DataSizeChange;
- static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* txn);
+ static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* opCtx);
static int64_t _makeKey(const RecordId& id);
static RecordId _fromKey(int64_t k);
void _dealtWithCappedId(SortedRecordIds::iterator it, bool didCommit);
- void _addUncommittedRecordId_inlock(OperationContext* txn, RecordId id);
+ void _addUncommittedRecordId_inlock(OperationContext* opCtx, RecordId id);
- Status _insertRecords(OperationContext* txn, Record* records, size_t nRecords);
+ Status _insertRecords(OperationContext* opCtx, Record* records, size_t nRecords);
RecordId _nextId();
void _setId(RecordId id);
bool cappedAndNeedDelete() const;
- void _changeNumRecords(OperationContext* txn, int64_t diff);
- void _increaseDataSize(OperationContext* txn, int64_t amount);
+ void _changeNumRecords(OperationContext* opCtx, int64_t diff);
+ void _increaseDataSize(OperationContext* opCtx, int64_t amount);
RecordData _getData(const WiredTigerCursor& cursor) const;
void _oplogSetStartHack(WiredTigerRecoveryUnit* wru) const;
void _oplogJournalThreadLoop(WiredTigerSessionCache* sessionCache);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index 1d0606ee9de..ef85a84bb4f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -77,34 +77,34 @@ public:
return false;
}
- const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
- OperationContext& txn = *txnPtr;
+ const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
+ OperationContext& opCtx = *opCtxPtr;
try {
- ScopedTransaction transaction(&txn, MODE_IX);
+ ScopedTransaction transaction(&opCtx, MODE_IX);
- AutoGetDb autoDb(&txn, _ns.db(), MODE_IX);
+ AutoGetDb autoDb(&opCtx, _ns.db(), MODE_IX);
Database* db = autoDb.getDb();
if (!db) {
LOG(2) << "no local database yet";
return false;
}
- Lock::CollectionLock collectionLock(txn.lockState(), _ns.ns(), MODE_IX);
+ Lock::CollectionLock collectionLock(opCtx.lockState(), _ns.ns(), MODE_IX);
Collection* collection = db->getCollection(_ns);
if (!collection) {
LOG(2) << "no collection " << _ns;
return false;
}
- OldClientContext ctx(&txn, _ns.ns(), false);
+ OldClientContext ctx(&opCtx, _ns.ns(), false);
WiredTigerRecordStore* rs =
checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
- if (!rs->yieldAndAwaitOplogDeletionRequest(&txn)) {
+ if (!rs->yieldAndAwaitOplogDeletionRequest(&opCtx)) {
return false; // Oplog went away.
}
- rs->reclaimOplog(&txn);
+ rs->reclaimOplog(&opCtx);
} catch (const std::exception& e) {
severe() << "error in WiredTigerRecordStoreThread: " << e.what();
fassertFailedNoTrace(!"error in WiredTigerRecordStoreThread");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
index daa8998af2a..f42ade90db8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h
@@ -51,7 +51,7 @@ public:
RecordId lastRecord; // RecordId of the last record in a chunk of the oplog.
};
- OplogStones(OperationContext* txn, WiredTigerRecordStore* rs);
+ OplogStones(OperationContext* opCtx, WiredTigerRecordStore* rs);
bool isDead();
@@ -69,12 +69,12 @@ public:
void createNewStoneIfNeeded(RecordId lastRecord);
- void updateCurrentStoneAfterInsertOnCommit(OperationContext* txn,
+ void updateCurrentStoneAfterInsertOnCommit(OperationContext* opCtx,
int64_t bytesInserted,
RecordId highestInserted,
int64_t countInserted);
- void clearStonesOnCommit(OperationContext* txn);
+ void clearStonesOnCommit(OperationContext* opCtx);
// Updates the metadata about the oplog stones after a rollback occurs.
void updateStonesAfterCappedTruncateAfter(int64_t recordsRemoved,
@@ -110,9 +110,9 @@ private:
class InsertChange;
class TruncateChange;
- void _calculateStones(OperationContext* txn);
- void _calculateStonesByScanning(OperationContext* txn);
- void _calculateStonesBySampling(OperationContext* txn,
+ void _calculateStones(OperationContext* opCtx);
+ void _calculateStonesByScanning(OperationContext* opCtx);
+ void _calculateStonesBySampling(OperationContext* opCtx,
int64_t estRecordsPerStone,
int64_t estBytesPerStone);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index f8e8604d31b..e47072cbe58 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -96,7 +96,7 @@ public:
}
std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
- OperationContextNoop txn(ru);
+ OperationContextNoop opCtx(ru);
string uri = "table:" + ns;
StatusWith<std::string> result = WiredTigerRecordStore::generateCreateString(
@@ -105,14 +105,14 @@ public:
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
+ WriteUnitOfWork uow(&opCtx);
+ WT_SESSION* s = ru->getSession(&opCtx)->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
uow.commit();
}
return stdx::make_unique<WiredTigerRecordStore>(
- &txn, ns, uri, kWiredTigerEngineName, false, false);
+ &opCtx, ns, uri, kWiredTigerEngineName, false, false);
}
std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
@@ -124,7 +124,7 @@ public:
int64_t cappedMaxSize,
int64_t cappedMaxDocs) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
- OperationContextNoop txn(ru);
+ OperationContextNoop opCtx(ru);
string uri = "table:a.b";
CollectionOptions options;
@@ -136,14 +136,14 @@ public:
std::string config = result.getValue();
{
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
+ WriteUnitOfWork uow(&opCtx);
+ WT_SESSION* s = ru->getSession(&opCtx)->getSession();
invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
uow.commit();
}
return stdx::make_unique<WiredTigerRecordStore>(
- &txn, ns, uri, kWiredTigerEngineName, true, false, cappedMaxSize, cappedMaxDocs);
+ &opCtx, ns, uri, kWiredTigerEngineName, true, false, cappedMaxSize, cappedMaxDocs);
}
std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
@@ -782,13 +782,15 @@ TEST(WiredTigerRecordStoreTest, CappedCursorRollover) {
ASSERT(!cursor->next());
}
-RecordId _oplogOrderInsertOplog(OperationContext* txn, const unique_ptr<RecordStore>& rs, int inc) {
+RecordId _oplogOrderInsertOplog(OperationContext* opCtx,
+ const unique_ptr<RecordStore>& rs,
+ int inc) {
Timestamp opTime = Timestamp(5, inc);
WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- Status status = wrs->oplogDiskLocRegister(txn, opTime);
+ Status status = wrs->oplogDiskLocRegister(opCtx, opTime);
ASSERT_OK(status);
BSONObj obj = BSON("ts" << opTime);
- StatusWith<RecordId> res = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ StatusWith<RecordId> res = rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false);
ASSERT_OK(res.getStatus());
return res.getValue();
}
@@ -879,8 +881,8 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) {
// the visibility rules aren't violated. See SERVER-21645
{
auto client2 = harnessHelper->serviceContext()->makeClient("c2");
- auto txn = harnessHelper->newOperationContext(client2.get());
- rs->cappedTruncateAfter(txn.get(), id1, /*inclusive*/ false);
+ auto opCtx = harnessHelper->newOperationContext(client2.get());
+ rs->cappedTruncateAfter(opCtx.get(), id1, /*inclusive*/ false);
}
{
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 483e0aa4de5..69f81c96e4b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -209,7 +209,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) {
}
SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const {
- // TODO: use actual wiredtiger txn id
+ // TODO: use actual wiredtiger opCtx id
return SnapshotId(_mySnapshotId);
}
@@ -257,10 +257,10 @@ void WiredTigerRecoveryUnit::_txnOpen(OperationContext* opCtx) {
WiredTigerCursor::WiredTigerCursor(const std::string& uri,
uint64_t tableId,
bool forRecordStore,
- OperationContext* txn) {
+ OperationContext* opCtx) {
_tableID = tableId;
- _ru = WiredTigerRecoveryUnit::get(txn);
- _session = _ru->getSession(txn);
+ _ru = WiredTigerRecoveryUnit::get(opCtx);
+ _session = _ru->getSession(opCtx);
_cursor = _session->getCursor(uri, tableId, forRecordStore);
if (!_cursor) {
error() << "no cursor for uri: " << uri;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index 48e7a31f200..5822630b004 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -82,7 +82,7 @@ public:
WiredTigerSession* getSession(OperationContext* opCtx);
/**
- * Returns a session without starting a new WT txn on the session. Will not close any already
+ * Returns a session without starting a new WT opCtx on the session. Will not close any already
* running session.
*/
@@ -101,8 +101,8 @@ public:
return _oplogReadTill;
}
- static WiredTigerRecoveryUnit* get(OperationContext* txn) {
- return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+ static WiredTigerRecoveryUnit* get(OperationContext* opCtx) {
+ return checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
}
static void appendGlobalStats(BSONObjBuilder& b);
@@ -146,7 +146,7 @@ public:
WiredTigerCursor(const std::string& uri,
uint64_t tableID,
bool forRecordStore,
- OperationContext* txn);
+ OperationContext* opCtx);
~WiredTigerCursor();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
index 4967dfc2f86..0fc7cea7be7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
@@ -54,12 +54,12 @@ bool WiredTigerServerStatusSection::includeByDefault() const {
return true;
}
-BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* txn,
+BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* opCtx,
const BSONElement& configElement) const {
// The session does not open a transaction here as one is not needed and opening one would
// mean that execution could become blocked when a new transaction cannot be allocated
// immediately.
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSessionNoTxn(txn);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSessionNoTxn(opCtx);
invariant(session);
WT_SESSION* s = session->getSession();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
index 5e7c3b3e8a1..d724abee7a3 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
@@ -43,7 +43,8 @@ class WiredTigerServerStatusSection : public ServerStatusSection {
public:
WiredTigerServerStatusSection(WiredTigerKVEngine* engine);
virtual bool includeByDefault() const;
- virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+ virtual BSONObj generateSection(OperationContext* opCtx,
+ const BSONElement& configElement) const;
private:
WiredTigerKVEngine* _engine;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
index da0f618f1bd..ea42dac59d0 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp
@@ -41,13 +41,14 @@
namespace mongo {
-Status WiredTigerSnapshotManager::prepareForCreateSnapshot(OperationContext* txn) {
- WiredTigerRecoveryUnit::get(txn)->prepareForCreateSnapshot(txn);
+Status WiredTigerSnapshotManager::prepareForCreateSnapshot(OperationContext* opCtx) {
+ WiredTigerRecoveryUnit::get(opCtx)->prepareForCreateSnapshot(opCtx);
return Status::OK();
}
-Status WiredTigerSnapshotManager::createSnapshot(OperationContext* txn, const SnapshotName& name) {
- auto session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+Status WiredTigerSnapshotManager::createSnapshot(OperationContext* opCtx,
+ const SnapshotName& name) {
+ auto session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession();
const std::string config = str::stream() << "name=" << name.asU64();
return wtRCToStatus(session->snapshot(session, config.c_str()));
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
index 29763557fe3..d885df0c863 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h
@@ -51,7 +51,7 @@ public:
shutdown();
}
- Status prepareForCreateSnapshot(OperationContext* txn) final;
+ Status prepareForCreateSnapshot(OperationContext* opCtx) final;
Status createSnapshot(OperationContext* ru, const SnapshotName& name) final;
void setCommittedSnapshot(const SnapshotName& name) final;
void cleanupUnneededSnapshots() final;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 0f784c367ab..d92e3e66875 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -419,18 +419,18 @@ int WiredTigerUtil::ErrorAccumulator::onError(WT_EVENT_HANDLER* handler,
}
}
-int WiredTigerUtil::verifyTable(OperationContext* txn,
+int WiredTigerUtil::verifyTable(OperationContext* opCtx,
const std::string& uri,
std::vector<std::string>* errors) {
ErrorAccumulator eventHandler(errors);
// Try to close as much as possible to avoid EBUSY errors.
- WiredTigerRecoveryUnit::get(txn)->getSession(txn)->closeAllCursors();
- WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->closeAllCursors();
+ WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache();
sessionCache->closeAllCursors();
// Open a new session with custom error handlers.
- WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn();
+ WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn();
WT_SESSION* session;
invariantWTOK(conn->open_session(conn, &eventHandler, NULL, &session));
ON_BLOCK_EXIT(session->close, session, "");
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
index 33673f6e652..248ac7b8450 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
@@ -214,7 +214,7 @@ public:
*
* If errors is non-NULL, all error messages will be appended to the array.
*/
- static int verifyTable(OperationContext* txn,
+ static int verifyTable(OperationContext* opCtx,
const std::string& uri,
std::vector<std::string>* errors = NULL);