diff options
author | Martin Bligh <mbligh@mongodb.com> | 2015-09-21 17:57:44 -0400 |
---|---|---|
committer | Martin Bligh <mbligh@mongodb.com> | 2015-09-21 17:57:44 -0400 |
commit | dfcfb29ed6456aa1b2a7f4082c49ca4c93d91cee (patch) | |
tree | fd35fed706c564af9b259a6458a1d57022bbd307 /src | |
parent | c101632305f47619cd8f75429fe19621b36b727b (diff) | |
download | mongo-dfcfb29ed6456aa1b2a7f4082c49ca4c93d91cee.tar.gz |
SERVER-20549: Cleanup naming of loc vs id in WiredTiger
Diffstat (limited to 'src')
7 files changed, 188 insertions, 195 deletions
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp index 06c3a7720f7..7a861b76b66 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp @@ -227,9 +227,9 @@ WiredTigerIndex::WiredTigerIndex(OperationContext* ctx, Status WiredTigerIndex::insert(OperationContext* txn, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { - invariant(loc.isNormal()); + invariant(id.isNormal()); dassert(!hasFieldNames(key)); Status s = checkKeySize(key); @@ -240,14 +240,14 @@ Status WiredTigerIndex::insert(OperationContext* txn, curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); - return _insert(c, key, loc, dupsAllowed); + return _insert(c, key, id, dupsAllowed); } void WiredTigerIndex::unindex(OperationContext* txn, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { - invariant(loc.isNormal()); + invariant(id.isNormal()); dassert(!hasFieldNames(key)); WiredTigerCursor curwrap(_uri, _tableId, false, txn); @@ -255,7 +255,7 @@ void WiredTigerIndex::unindex(OperationContext* txn, WT_CURSOR* c = curwrap.get(); invariant(c); - _unindex(c, key, loc, dupsAllowed); + _unindex(c, key, id, dupsAllowed); } void WiredTigerIndex::fullValidate(OperationContext* txn, @@ -353,16 +353,14 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* txn, return true; } -Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, - const BSONObj& key, - const RecordId& loc) { +Status WiredTigerIndex::dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id) { invariant(!hasFieldNames(key)); invariant(unique()); WiredTigerCursor curwrap(_uri, _tableId, false, txn); WT_CURSOR* c = curwrap.get(); - if (isDup(c, key, loc)) + if (isDup(c, key, id)) return dupKeyError(key); return Status::OK(); } @@ -384,7 +382,7 @@ long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* txn) const { return static_cast<long long>(WiredTigerUtil::getIdentSize(session->getSession(), _uri)); } -bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& loc) { +bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& id) { invariant(unique()); // First check whether the key exists. KeyString data(key, _ordering); @@ -396,13 +394,13 @@ bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& lo } invariantWTOK(ret); - // If the key exists, check if we already have this loc at this key. If so, we don't + // If the key exists, check if we already have this id at this key. If so, we don't // consider that to be a dup. WT_ITEM value; invariantWTOK(c->get_value(c, &value)); BufReader br(value.data, value.size); while (br.remaining()) { - if (KeyString::decodeRecordId(&br) == loc) + if (KeyString::decodeRecordId(&br) == id) return false; KeyString::TypeBits::fromBuffer(&br); // Just calling this to advance reader. @@ -469,14 +467,14 @@ public: StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* txn) : BulkBuilder(idx, txn), _idx(idx) {} - Status addKey(const BSONObj& key, const RecordId& loc) { + Status addKey(const BSONObj& key, const RecordId& id) { { const Status s = checkKeySize(key); if (!s.isOK()) return s; } - KeyString data(key, _idx->_ordering, loc); + KeyString data(key, _idx->_ordering, id); // Can't use WiredTigerCursor since we aren't using the cache. WiredTigerItem item(data.getBuffer(), data.getSize()); @@ -509,7 +507,7 @@ private: * * In order to support unique indexes in dupsAllowed mode this class only does an actual insert * after it sees a key after the one we are trying to insert. This allows us to gather up all - * duplicate locs and insert them all together. This is necessary since bulk cursors can only + * duplicate ids and insert them all together. This is necessary since bulk cursors can only * append data. */ class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder { @@ -517,7 +515,7 @@ public: UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* txn, bool dupsAllowed) : BulkBuilder(idx, txn), _idx(idx), _dupsAllowed(dupsAllowed) {} - Status addKey(const BSONObj& newKey, const RecordId& loc) { + Status addKey(const BSONObj& newKey, const RecordId& id) { { const Status s = checkKeySize(newKey); if (!s.isOK()) @@ -539,13 +537,13 @@ public: } // If we get here, we are in the weird mode where dups are allowed on a unique - // index, so add ourselves to the list of duplicate locs. This also replaces the + // index, so add ourselves to the list of duplicate ids. This also replaces the // _key which is correct since any dups seen later are likely to be newer. } _key = newKey.getOwned(); _keyString.resetToKey(_key, _idx->ordering()); - _records.push_back(std::make_pair(loc, _keyString.getTypeBits())); + _records.push_back(std::make_pair(id, _keyString.getTypeBits())); return Status::OK(); } @@ -702,23 +700,23 @@ public: protected: // Called after _key has been filled in. Must not throw WriteConflictException. - virtual void updateLocAndTypeBits() = 0; + virtual void updateIdAndTypeBits() = 0; boost::optional<IndexKeyEntry> curr(RequestedInfo parts) const { if (_eof) return {}; dassert(!atOrPastEndPointAfterSeeking()); - dassert(!_loc.isNull()); + dassert(!_id.isNull()); BSONObj bson; if (TRACING_ENABLED || (parts & kWantKey)) { bson = KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.ordering(), _typeBits); - TRACE_CURSOR << " returning " << bson << ' ' << _loc; + TRACE_CURSOR << " returning " << bson << ' ' << _id; } - return {{std::move(bson), _loc}}; + return {{std::move(bson), _id}}; } bool atOrPastEndPointAfterSeeking() const { @@ -795,7 +793,7 @@ protected: _lastMoveWasRestore = false; if (_cursorAtEof) { _eof = true; - _loc = RecordId(); + _id = RecordId(); return; } @@ -811,7 +809,7 @@ protected: return; } - updateLocAndTypeBits(); + updateIdAndTypeBits(); } OperationContext* _txn; @@ -823,7 +821,7 @@ protected: // next(). KeyString _key; KeyString::TypeBits _typeBits; - RecordId _loc; + RecordId _id; bool _eof = false; // This differs from _eof in that it always reflects the result of the most recent call to @@ -844,8 +842,8 @@ public: WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward) : WiredTigerIndexCursorBase(idx, txn, forward) {} - void updateLocAndTypeBits() override { - _loc = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize()); + void updateIdAndTypeBits() override { + _id = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize()); WT_CURSOR* c = _cursor->get(); WT_ITEM item; @@ -863,10 +861,10 @@ public: void restore() override { WiredTigerIndexCursorBase::restore(); - // In addition to seeking to the correct key, we also need to make sure that the loc is - // on the correct side of _loc. + // In addition to seeking to the correct key, we also need to make sure that the id is + // on the correct side of _id. if (_lastMoveWasRestore) - return; // We are on a different key so no need to check loc. + return; // We are on a different key so no need to check id. if (_eof) return; @@ -877,22 +875,22 @@ public: invariantWTOK(c->get_value(c, &item)); BufReader br(item.data, item.size); - RecordId locInIndex = KeyString::decodeRecordId(&br); + RecordId idInIndex = KeyString::decodeRecordId(&br); TRACE_CURSOR << "restore" - << " _loc:" << _loc << " locInIndex:" << locInIndex; + << " _id:" << _id << " idInIndex:" << idInIndex; - if (locInIndex == _loc) + if (idInIndex == _id) return; _lastMoveWasRestore = true; - if (_forward && (locInIndex < _loc)) + if (_forward && (idInIndex < _id)) advanceWTCursor(); - if (!_forward && (locInIndex > _loc)) + if (!_forward && (idInIndex > _id)) advanceWTCursor(); } - void updateLocAndTypeBits() override { + void updateIdAndTypeBits() override { // We assume that cursors can only ever see unique indexes in their "pristine" state, // where no duplicates are possible. The cases where dups are allowed should hold // sufficient locks to ensure that no cursor ever sees them. @@ -901,7 +899,7 @@ public: invariantWTOK(c->get_value(c, &item)); BufReader br(item.data, item.size); - _loc = KeyString::decodeRecordId(&br); + _id = KeyString::decodeRecordId(&br); _typeBits.resetFromBuffer(&br); if (!br.atEof()) { @@ -948,12 +946,12 @@ SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationConte Status WiredTigerIndexUnique::_insert(WT_CURSOR* c, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { const KeyString data(key, _ordering); WiredTigerItem keyItem(data.getBuffer(), data.getSize()); - KeyString value(loc); + KeyString value(id); if (!data.getTypeBits().isAllZeros()) value.appendTypeBits(data.getTypeBits()); @@ -968,7 +966,7 @@ Status WiredTigerIndexUnique::_insert(WT_CURSOR* c, // we might be in weird mode where there might be multiple values // we put them all in the "list" - // Note that we can't omit AllZeros when there are multiple locs for a value. When we remove + // Note that we can't omit AllZeros when there are multiple ids for a value. When we remove // down to a single value, it will be cleaned up. ret = WT_OP_CHECK(c->search(c)); invariantWTOK(ret); @@ -976,32 +974,32 @@ Status WiredTigerIndexUnique::_insert(WT_CURSOR* c, WT_ITEM old; invariantWTOK(c->get_value(c, &old)); - bool insertedLoc = false; + bool insertedId = false; value.resetToEmpty(); BufReader br(old.data, old.size); while (br.remaining()) { - RecordId locInIndex = KeyString::decodeRecordId(&br); - if (loc == locInIndex) + RecordId idInIndex = KeyString::decodeRecordId(&br); + if (id == idInIndex) return Status::OK(); // already in index - if (!insertedLoc && loc < locInIndex) { - value.appendRecordId(loc); + if (!insertedId && id < idInIndex) { + value.appendRecordId(id); value.appendTypeBits(data.getTypeBits()); - insertedLoc = true; + insertedId = true; } // Copy from old to new value - value.appendRecordId(locInIndex); + value.appendRecordId(idInIndex); value.appendTypeBits(KeyString::TypeBits::fromBuffer(&br)); } if (!dupsAllowed) return dupKeyError(key); - if (!insertedLoc) { - // This loc is higher than all currently in the index for this key - value.appendRecordId(loc); + if (!insertedId) { + // This id is higher than all currently in the index for this key + value.appendRecordId(id); value.appendTypeBits(data.getTypeBits()); } @@ -1012,7 +1010,7 @@ Status WiredTigerIndexUnique::_insert(WT_CURSOR* c, void WiredTigerIndexUnique::_unindex(WT_CURSOR* c, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { KeyString data(key, _ordering); WiredTigerItem keyItem(data.getBuffer(), data.getSize()); @@ -1038,35 +1036,35 @@ void WiredTigerIndexUnique::_unindex(WT_CURSOR* c, WT_ITEM old; invariantWTOK(c->get_value(c, &old)); - bool foundLoc = false; + bool foundId = false; std::vector<std::pair<RecordId, KeyString::TypeBits>> records; BufReader br(old.data, old.size); while (br.remaining()) { - RecordId locInIndex = KeyString::decodeRecordId(&br); + RecordId idInIndex = KeyString::decodeRecordId(&br); KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(&br); - if (loc == locInIndex) { + if (id == idInIndex) { if (records.empty() && !br.remaining()) { - // This is the common case: we are removing the only loc for this key. + // This is the common case: we are removing the only id for this key. // Remove the whole entry. invariantWTOK(WT_OP_CHECK(c->remove(c))); return; } - foundLoc = true; + foundId = true; continue; } - records.push_back(std::make_pair(locInIndex, typeBits)); + records.push_back(std::make_pair(idInIndex, typeBits)); } - if (!foundLoc) { - warning().stream() << loc << " not found in the index for key " << key; + if (!foundId) { + warning().stream() << id << " not found in the index for key " << key; return; // nothing to do } - // Put other locs for this key back in the index. + // Put other ids for this key back in the index. KeyString newValue; invariant(!records.empty()); for (size_t i = 0; i < records.size(); i++) { @@ -1104,13 +1102,13 @@ SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationCon Status WiredTigerIndexStandard::_insert(WT_CURSOR* c, const BSONObj& keyBson, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { invariant(dupsAllowed); - TRACE_INDEX << " key: " << keyBson << " loc: " << loc; + TRACE_INDEX << " key: " << keyBson << " id: " << id; - KeyString key(keyBson, _ordering, loc); + KeyString key(keyBson, _ordering, id); WiredTigerItem keyItem(key.getBuffer(), key.getSize()); WiredTigerItem valueItem = key.getTypeBits().isAllZeros() @@ -1131,10 +1129,10 @@ Status WiredTigerIndexStandard::_insert(WT_CURSOR* c, void WiredTigerIndexStandard::_unindex(WT_CURSOR* c, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) { invariant(dupsAllowed); - KeyString data(key, _ordering, loc); + KeyString data(key, _ordering, id); WiredTigerItem item(data.getBuffer(), data.getSize()); c->set_key(c, item.Get()); int ret = WT_OP_CHECK(c->remove(c)); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h index b499b95f64a..c72a12f89d9 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h @@ -80,12 +80,12 @@ public: virtual Status insert(OperationContext* txn, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed); virtual void unindex(OperationContext* txn, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed); virtual void fullValidate(OperationContext* txn, @@ -95,13 +95,13 @@ public: virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale) const; - virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc); + virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& id); virtual bool isEmpty(OperationContext* txn); virtual long long getSpaceUsedBytes(OperationContext* txn) const; - bool isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& loc); + bool isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& id); virtual Status initAsEmpty(OperationContext* txn); @@ -123,12 +123,12 @@ public: protected: virtual Status _insert(WT_CURSOR* c, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) = 0; virtual void _unindex(WT_CURSOR* c, const BSONObj& key, - const RecordId& loc, + const RecordId& id, bool dupsAllowed) = 0; class BulkBuilder; @@ -158,12 +158,9 @@ public: return true; } - Status _insert(WT_CURSOR* c, - const BSONObj& key, - const RecordId& loc, - bool dupsAllowed) override; + Status _insert(WT_CURSOR* c, const BSONObj& key, const RecordId& id, bool dupsAllowed) override; - void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& loc, bool dupsAllowed) override; + void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& id, bool dupsAllowed) override; }; class WiredTigerIndexStandard : public WiredTigerIndex { @@ -181,12 +178,9 @@ public: return false; } - Status _insert(WT_CURSOR* c, - const BSONObj& key, - const RecordId& loc, - bool dupsAllowed) override; + Status _insert(WT_CURSOR* c, const BSONObj& key, const RecordId& id, bool dupsAllowed) override; - void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& loc, bool dupsAllowed) override; + void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& id, bool dupsAllowed) override; }; } // namespace diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 9dcbbde07a9..3ab83810442 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -869,12 +869,12 @@ RecordData WiredTigerRecordStore::_getData(const WiredTigerCursor& cursor) const return RecordData(data, value.size); } -RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const { +RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& id) const { // ownership passes to the shared_array created below WiredTigerCursor curwrap(_uri, _tableId, true, txn); WT_CURSOR* c = curwrap.get(); invariant(c); - c->set_key(c, _makeKey(loc)); + c->set_key(c, _makeKey(id)); int ret = WT_OP_CHECK(c->search(c)); massert(28556, "Didn't find RecordId in WiredTigerRecordStore", ret != WT_NOTFOUND); invariantWTOK(ret); @@ -882,12 +882,12 @@ RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& } bool WiredTigerRecordStore::findRecord(OperationContext* txn, - const RecordId& loc, + const RecordId& id, RecordData* out) const { WiredTigerCursor curwrap(_uri, _tableId, true, txn); WT_CURSOR* c = curwrap.get(); invariant(c); - c->set_key(c, _makeKey(loc)); + c->set_key(c, _makeKey(id)); int ret = WT_OP_CHECK(c->search(c)); if (ret == WT_NOTFOUND) { return false; @@ -897,7 +897,7 @@ bool WiredTigerRecordStore::findRecord(OperationContext* txn, return true; } -void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) { +void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& id) { // Deletes should never occur on a capped collection because truncation uses // WT_SESSION::truncate(). invariant(!isCapped()); @@ -905,7 +905,7 @@ void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& WiredTigerCursor cursor(_uri, _tableId, true, txn); cursor.assertInActiveTxn(); WT_CURSOR* c = cursor.get(); - c->set_key(c, _makeKey(loc)); + c->set_key(c, _makeKey(id)); int ret = WT_OP_CHECK(c->search(c)); invariantWTOK(ret); @@ -1166,7 +1166,7 @@ Status WiredTigerRecordStore::insertRecords(OperationContext* txn, WT_CURSOR* c = curwrap.get(); invariant(c); - RecordId highestLoc = RecordId(); + RecordId highestId = RecordId(); dassert(!records->empty()); for (auto& record : *records) { if (_useOplogHack) { @@ -1176,20 +1176,20 @@ Status WiredTigerRecordStore::insertRecords(OperationContext* txn, return status.getStatus(); record.id = status.getValue(); } else if (_isCapped) { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); record.id = _nextId(); - _addUncommitedDiskLoc_inlock(txn, record.id); + _addUncommitedRecordId_inlock(txn, record.id); } else { record.id = _nextId(); } - dassert(record.id > highestLoc); - highestLoc = record.id; + dassert(record.id > highestId); + highestId = record.id; } - if (_useOplogHack && (highestLoc > _oplog_highestSeen)) { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - if (highestLoc > _oplog_highestSeen) - _oplog_highestSeen = highestLoc; + if (_useOplogHack && (highestId > _oplog_highestSeen)) { + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + if (highestId > _oplog_highestSeen) + _oplog_highestSeen = highestId; } for (auto& record : *records) { @@ -1206,9 +1206,9 @@ Status WiredTigerRecordStore::insertRecords(OperationContext* txn, if (_oplogStones) { _oplogStones->updateCurrentStoneAfterInsertOnCommit( - txn, totalLength, highestLoc, records->size()); + txn, totalLength, highestId, records->size()); } else { - cappedDeleteAsNeeded(txn, highestLoc); + cappedDeleteAsNeeded(txn, highestId); } return Status::OK(); @@ -1228,25 +1228,25 @@ StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn, } -void WiredTigerRecordStore::_dealtWithCappedLoc(const RecordId& loc) { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - SortedDiskLocs::iterator it = - std::find(_uncommittedDiskLocs.begin(), _uncommittedDiskLocs.end(), loc); - invariant(it != _uncommittedDiskLocs.end()); - _uncommittedDiskLocs.erase(it); +void WiredTigerRecordStore::_dealtWithCappedId(const RecordId& id) { + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + SortedRecordIds::iterator it = + std::find(_uncommittedRecordIds.begin(), _uncommittedRecordIds.end(), id); + invariant(it != _uncommittedRecordIds.end()); + _uncommittedRecordIds.erase(it); } -bool WiredTigerRecordStore::isCappedHidden(const RecordId& loc) const { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - if (_uncommittedDiskLocs.empty()) { +bool WiredTigerRecordStore::isCappedHidden(const RecordId& id) const { + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + if (_uncommittedRecordIds.empty()) { return false; } - return _uncommittedDiskLocs.front() <= loc; + return _uncommittedRecordIds.front() <= id; } RecordId WiredTigerRecordStore::lowestCappedHiddenRecord() const { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - return _uncommittedDiskLocs.empty() ? RecordId() : _uncommittedDiskLocs.front(); + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + return _uncommittedRecordIds.empty() ? RecordId() : _uncommittedRecordIds.front(); } StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn, @@ -1261,7 +1261,7 @@ StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn, } StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, - const RecordId& loc, + const RecordId& id, const char* data, int len, bool enforceQuota, @@ -1270,7 +1270,7 @@ StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, curwrap.assertInActiveTxn(); WT_CURSOR* c = curwrap.get(); invariant(c); - c->set_key(c, _makeKey(loc)); + c->set_key(c, _makeKey(id)); int ret = WT_OP_CHECK(c->search(c)); invariantWTOK(ret); @@ -1284,7 +1284,7 @@ StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, return {ErrorCodes::IllegalOperation, "Cannot change the size of a document in the oplog"}; } - c->set_key(c, _makeKey(loc)); + c->set_key(c, _makeKey(id)); WiredTigerItem value(data, len); c->set_value(c, value.Get()); ret = WT_OP_CHECK(c->insert(c)); @@ -1292,10 +1292,10 @@ StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn, _increaseDataSize(txn, len - old_length); if (!_oplogStones) { - cappedDeleteAsNeeded(txn, loc); + cappedDeleteAsNeeded(txn, id); } - return StatusWith<RecordId>(loc); + return StatusWith<RecordId>(id); } bool WiredTigerRecordStore::updateWithDamagesSupported() const { @@ -1304,7 +1304,7 @@ bool WiredTigerRecordStore::updateWithDamagesSupported() const { StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages( OperationContext* txn, - const RecordId& loc, + const RecordId& id, const RecordData& oldRec, const char* damageSource, const mutablebson::DamageVector& damages) { @@ -1312,11 +1312,11 @@ StatusWith<RecordData> WiredTigerRecordStore::updateWithDamages( } void WiredTigerRecordStore::_oplogSetStartHack(WiredTigerRecoveryUnit* wru) const { - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - if (_uncommittedDiskLocs.empty()) { + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + if (_uncommittedRecordIds.empty()) { wru->setOplogReadTill(_oplog_highestSeen); } else { - wru->setOplogReadTill(_uncommittedDiskLocs.front()); + wru->setOplogReadTill(_uncommittedRecordIds.front()); } } @@ -1501,44 +1501,45 @@ void WiredTigerRecordStore::appendCustomStats(OperationContext* txn, } } -Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) { - StatusWith<RecordId> loc = oploghack::keyForOptime(opTime); - if (!loc.isOK()) - return loc.getStatus(); +Status WiredTigerRecordStore::oplogRecordIdRegister(OperationContext* txn, + const Timestamp& opTime) { + StatusWith<RecordId> id = oploghack::keyForOptime(opTime); + if (!id.isOK()) + return id.getStatus(); - stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); - _addUncommitedDiskLoc_inlock(txn, loc.getValue()); + stdx::lock_guard<stdx::mutex> lk(_uncommittedRecordIdsMutex); + _addUncommitedRecordId_inlock(txn, id.getValue()); return Status::OK(); } class WiredTigerRecordStore::CappedInsertChange : public RecoveryUnit::Change { public: - CappedInsertChange(WiredTigerRecordStore* rs, const RecordId& loc) : _rs(rs), _loc(loc) {} + CappedInsertChange(WiredTigerRecordStore* rs, const RecordId& id) : _rs(rs), _id(id) {} virtual void commit() { // Do not notify here because all committed inserts notify, always. - _rs->_dealtWithCappedLoc(_loc); + _rs->_dealtWithCappedId(_id); } virtual void rollback() { // Notify on rollback since it might make later commits visible. - _rs->_dealtWithCappedLoc(_loc); + _rs->_dealtWithCappedId(_id); if (_rs->_cappedCallback) _rs->_cappedCallback->notifyCappedWaitersIfNeeded(); } private: WiredTigerRecordStore* _rs; - RecordId _loc; + RecordId _id; }; -void WiredTigerRecordStore::_addUncommitedDiskLoc_inlock(OperationContext* txn, - const RecordId& loc) { +void WiredTigerRecordStore::_addUncommitedRecordId_inlock(OperationContext* txn, + const RecordId& id) { // todo: make this a dassert at some point - // invariant(_uncommittedDiskLocs.empty() || _uncommittedDiskLocs.back() < loc); - _uncommittedDiskLocs.push_back(loc); - txn->recoveryUnit()->registerChange(new CappedInsertChange(this, loc)); - _oplog_highestSeen = loc; + // invariant(_uncommittedRecordIds.empty() || _uncommittedRecordIds.back() < id); + _uncommittedRecordIds.push_back(id); + txn->recoveryUnit()->registerChange(new CappedInsertChange(this, id)); + _oplog_highestSeen = id; } boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack( @@ -1635,8 +1636,8 @@ void WiredTigerRecordStore::_increaseDataSize(OperationContext* txn, int64_t amo } } -int64_t WiredTigerRecordStore::_makeKey(const RecordId& loc) { - return loc.repr(); +int64_t WiredTigerRecordStore::_makeKey(const RecordId& id) { + return id.repr(); } RecordId WiredTigerRecordStore::_fromKey(int64_t key) { return RecordId(key); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index 15d39b99ee3..ad274327574 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -108,11 +108,11 @@ public: // CRUD related - virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const; + virtual RecordData dataFor(OperationContext* txn, const RecordId& id) const; - virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const; + virtual bool findRecord(OperationContext* txn, const RecordId& id, RecordData* out) const; - virtual void deleteRecord(OperationContext* txn, const RecordId& dl); + virtual void deleteRecord(OperationContext* txn, const RecordId& id); virtual Status insertRecords(OperationContext* txn, std::vector<Record>* records, @@ -137,7 +137,7 @@ public: virtual bool updateWithDamagesSupported() const; virtual StatusWith<RecordData> updateWithDamages(OperationContext* txn, - const RecordId& loc, + const RecordId& id, const RecordData& oldRec, const char* damageSource, const mutablebson::DamageVector& damages); @@ -178,7 +178,7 @@ public: virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn, const RecordId& startingPosition) const; - virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime); + virtual Status oplogRecordIdRegister(OperationContext* txn, const Timestamp& opTime); virtual void updateStatsAfterRepair(OperationContext* txn, long long numRecords, @@ -208,7 +208,7 @@ public: _sizeStorer = ss; } - bool isCappedHidden(const RecordId& loc) const; + bool isCappedHidden(const RecordId& id) const; RecordId lowestCappedHiddenRecord() const; bool inShutdown() const; @@ -243,14 +243,14 @@ private: static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* txn); - static int64_t _makeKey(const RecordId& loc); + static int64_t _makeKey(const RecordId& id); static RecordId _fromKey(int64_t k); - void _dealtWithCappedLoc(const RecordId& loc); - void _addUncommitedDiskLoc_inlock(OperationContext* txn, const RecordId& loc); + void _dealtWithCappedId(const RecordId& id); + void _addUncommitedRecordId_inlock(OperationContext* txn, const RecordId& id); RecordId _nextId(); - void _setId(RecordId loc); + void _setId(RecordId id); bool cappedAndNeedDelete() const; void _changeNumRecords(OperationContext* txn, int64_t diff); void _increaseDataSize(OperationContext* txn, int64_t amount); @@ -277,11 +277,11 @@ private: const bool _useOplogHack; - typedef std::vector<RecordId> SortedDiskLocs; - SortedDiskLocs _uncommittedDiskLocs; + typedef std::vector<RecordId> SortedRecordIds; + SortedRecordIds _uncommittedRecordIds; RecordId _oplog_visibleTo; RecordId _oplog_highestSeen; - mutable stdx::mutex _uncommittedDiskLocsMutex; + mutable stdx::mutex _uncommittedRecordIdsMutex; AtomicInt64 _nextIdNum; AtomicInt64 _dataSize; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index f6f685a7017..f34d6b8294d 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -208,8 +208,8 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); - RecordId loc1; - RecordId loc2; + RecordId id1; + RecordId id2; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); @@ -218,11 +218,11 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc1 = res.getValue(); + id1 = res.getValue(); res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc2 = res.getValue(); + id2 = res.getValue(); uow.commit(); } @@ -235,15 +235,15 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get())); unique_ptr<WriteUnitOfWork> w2(new WriteUnitOfWork(t2.get())); - rs->dataFor(t1.get(), loc1); - rs->dataFor(t2.get(), loc1); + rs->dataFor(t1.get(), id1); + rs->dataFor(t2.get(), id1); - ASSERT_OK(rs->updateRecord(t1.get(), loc1, "b", 2, false, NULL).getStatus()); - ASSERT_OK(rs->updateRecord(t1.get(), loc2, "B", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id2, "B", 2, false, NULL).getStatus()); try { // this should fail - rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL); + rs->updateRecord(t2.get(), id1, "c", 2, false, NULL); ASSERT(0); } catch (WriteConflictException& dle) { w2.reset(NULL); @@ -258,8 +258,8 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); - RecordId loc1; - RecordId loc2; + RecordId id1; + RecordId id2; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); @@ -268,11 +268,11 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc1 = res.getValue(); + id1 = res.getValue(); res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc2 = res.getValue(); + id2 = res.getValue(); uow.commit(); } @@ -283,21 +283,21 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { unique_ptr<OperationContext> t2(harnessHelper->newOperationContext()); // ensure we start transactions - rs->dataFor(t1.get(), loc2); - rs->dataFor(t2.get(), loc2); + rs->dataFor(t1.get(), id2); + rs->dataFor(t2.get(), id2); { WriteUnitOfWork w(t1.get()); - ASSERT_OK(rs->updateRecord(t1.get(), loc1, "b", 2, false, NULL).getStatus()); + ASSERT_OK(rs->updateRecord(t1.get(), id1, "b", 2, false, NULL).getStatus()); w.commit(); } { WriteUnitOfWork w(t2.get()); - ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), loc1).data()); + ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), id1).data()); try { - // this should fail as our version of loc1 is too old - rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL); + // this should fail as our version of id1 is too old + rs->updateRecord(t2.get(), id1, "c", 2, false, NULL); ASSERT(0); } catch (WriteConflictException& dle) { } @@ -529,7 +529,7 @@ StatusWith<RecordId> insertBSON(unique_ptr<OperationContext>& opCtx, WriteUnitOfWork wuow(opCtx.get()); WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get()); invariant(wrs); - Status status = wrs->oplogDiskLocRegister(opCtx.get(), opTime); + Status status = wrs->oplogRecordIdRegister(opCtx.get(), opTime); if (!status.isOK()) return StatusWith<RecordId>(status); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false); @@ -648,7 +648,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) { unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000, 10000)); - RecordId loc1; + RecordId id1; { // first insert a document unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); @@ -656,7 +656,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc1 = res.getValue(); + id1 = res.getValue(); uow.commit(); } } @@ -664,8 +664,8 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) { { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); } @@ -689,8 +689,8 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) { { // state should be the same unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); } @@ -700,8 +700,8 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) { { // now all 3 docs should be visible unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(cursor->next()); ASSERT(cursor->next()); ASSERT(!cursor->next()); @@ -747,7 +747,7 @@ TEST(WiredTigerRecordStoreTest, CappedCursorRollover) { RecordId _oplogOrderInsertOplog(OperationContext* txn, unique_ptr<RecordStore>& rs, int inc) { Timestamp opTime = Timestamp(5, inc); WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get()); - Status status = wrs->oplogDiskLocRegister(txn, opTime); + Status status = wrs->oplogRecordIdRegister(txn, opTime); ASSERT_OK(status); BSONObj obj = BSON("ts" << opTime); StatusWith<RecordId> res = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false); @@ -765,13 +765,13 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) { ASSERT(wrs->usingOplogHack()); } - RecordId loc1; + RecordId id1; { // first insert a document unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); - loc1 = _oplogOrderInsertOplog(opCtx.get(), rs, 1); + id1 = _oplogOrderInsertOplog(opCtx.get(), rs, 1); uow.commit(); } } @@ -779,8 +779,8 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) { { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); } @@ -804,8 +804,8 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) { { // state should be the same unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); } @@ -815,8 +815,8 @@ TEST(WiredTigerRecordStoreTest, OplogOrder) { { // now all 3 docs should be visible unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); - auto record = cursor->seekExact(loc1); - ASSERT_EQ(loc1, record->id); + auto record = cursor->seekExact(id1); + ASSERT_EQ(id1, record->id); ASSERT(cursor->next()); ASSERT(cursor->next()); ASSERT(!cursor->next()); @@ -859,14 +859,14 @@ TEST(WiredTigerRecordStoreTest, CappedCursorYieldFirst) { unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 10000, 50)); - RecordId loc1; + RecordId id1; { // first insert a document unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false); ASSERT_OK(res.getStatus()); - loc1 = res.getValue(); + id1 = res.getValue(); uow.commit(); } @@ -879,7 +879,7 @@ TEST(WiredTigerRecordStoreTest, CappedCursorYieldFirst) { ASSERT_TRUE(cursor->restore()); auto record = cursor->next(); ASSERT(record); - ASSERT_EQ(loc1, record->id); + ASSERT_EQ(id1, record->id); ASSERT(!cursor->next()); } @@ -904,7 +904,7 @@ StatusWith<RecordId> insertBSONWithSize(OperationContext* opCtx, WriteUnitOfWork wuow(opCtx); WiredTigerRecordStore* wtrs = checked_cast<WiredTigerRecordStore*>(rs); invariant(wtrs); - Status status = wtrs->oplogDiskLocRegister(opCtx, opTime); + Status status = wtrs->oplogRecordIdRegister(opCtx, opTime); if (!status.isOK()) { return StatusWith<RecordId>(status); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp index fd9fbfeb082..bcdd3c44a5a 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp @@ -187,8 +187,8 @@ void WiredTigerRecoveryUnit::abandonSnapshot() { _areWriteUnitOfWorksBanned = false; } -void WiredTigerRecoveryUnit::setOplogReadTill(const RecordId& loc) { - _oplogReadTill = loc; +void WiredTigerRecoveryUnit::setOplogReadTill(const RecordId& id) { + _oplogReadTill = id; } namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h index 97b1d21b0c9..1cce1195054 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h @@ -98,7 +98,7 @@ public: return _everStartedWrite; } - void setOplogReadTill(const RecordId& loc); + void setOplogReadTill(const RecordId& id); RecordId getOplogReadTill() const { return _oplogReadTill; } |