summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-04-09 18:21:47 -0400
committerEliot Horowitz <eliot@10gen.com>2014-04-10 14:47:36 -0400
commitf3e81c695eef53e4fb9754ec4b12d55582e30f6a (patch)
tree93a71dca363a78dd6448651e8eeac53b1523ea76
parent9adb4e12d032c94c77cffe6669456d29b7925b6d (diff)
downloadmongo-f3e81c695eef53e4fb9754ec4b12d55582e30f6a.tar.gz
SERVER-13084: remove DiskLoc::drec
-rw-r--r--src/mongo/db/commands/storage_details.cpp20
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/diskloc.h1
-rw-r--r--src/mongo/db/storage/extent_manager.h7
-rw-r--r--src/mongo/db/storage/record.cpp5
-rw-r--r--src/mongo/db/structure/record_store.h4
-rw-r--r--src/mongo/db/structure/record_store_v1_base.cpp14
-rw-r--r--src/mongo/db/structure/record_store_v1_base.h7
-rw-r--r--src/mongo/db/structure/record_store_v1_capped.cpp60
-rw-r--r--src/mongo/db/structure/record_store_v1_capped.h2
-rw-r--r--src/mongo/db/structure/record_store_v1_simple.cpp13
-rw-r--r--src/mongo/db/structure/record_store_v1_simple.h2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp14
13 files changed, 90 insertions, 61 deletions
diff --git a/src/mongo/db/commands/storage_details.cpp b/src/mongo/db/commands/storage_details.cpp
index 374f353d010..60001f8e869 100644
--- a/src/mongo/db/commands/storage_details.cpp
+++ b/src/mongo/db/commands/storage_details.cpp
@@ -523,10 +523,10 @@ namespace {
*
* @return true on success, false on failure (partial output may still be present)
*/
- bool analyzeDiskStorage(const NamespaceDetails* nsd, const Extent* ex,
- const AnalyzeParams& params, string& errmsg,
- BSONObjBuilder& result) {
- bool isCapped = nsd->isCapped();
+ bool analyzeDiskStorage(const Collection* collection, const Extent* ex,
+ const AnalyzeParams& params, string& errmsg,
+ BSONObjBuilder& result) {
+ bool isCapped = collection->isCapped();
result.append("extentHeaderBytes", Extent::HeaderSize());
result.append("recordHeaderBytes", Record::HeaderSize);
@@ -568,9 +568,9 @@ namespace {
if (processingDeletedRecords) {
for (int bucketNum = 0; bucketNum < mongo::Buckets; bucketNum++) {
- DiskLoc dl = nsd->deletedListEntry(bucketNum);
+ DiskLoc dl = collection->details()->deletedListEntry(bucketNum);
while (!dl.isNull()) {
- DeletedRecord* dr = dl.drec();
+ const DeletedRecord* dr = collection->getRecordStore()->deletedRecordFor(dl);
processDeletedRecord(dl, dr, ex, params, bucketNum, sliceData,
deletedRecordsArrayBuilder.get());
dl = dr->nextDeleted();
@@ -677,7 +677,7 @@ namespace {
* @param params analysis parameters, will be updated with computed number of slices or
* granularity
*/
- bool analyzeExtent(const NamespaceDetails* nsd, const Extent* ex, SubCommand subCommand,
+ bool analyzeExtent(const Collection* collection, const Extent* ex, SubCommand subCommand,
AnalyzeParams& params, string& errmsg, BSONObjBuilder& outputBuilder) {
params.startOfs = max(0, params.startOfs);
@@ -698,7 +698,7 @@ namespace {
(params.granularity * (params.numberOfSlices - 1));
switch (subCommand) {
case SUBCMD_DISK_STORAGE:
- return analyzeDiskStorage(nsd, ex, params, errmsg, outputBuilder);
+ return analyzeDiskStorage(collection, ex, params, errmsg, outputBuilder);
case SUBCMD_PAGES_IN_RAM:
return analyzePagesInRAM(ex, params, errmsg, outputBuilder);
}
@@ -717,7 +717,7 @@ namespace {
// failure
bool success = false;
if (ex != NULL) {
- success = analyzeExtent(nsd, ex, subCommand, globalParams, errmsg, outputBuilder);
+ success = analyzeExtent(collection, ex, subCommand, globalParams, errmsg, outputBuilder);
}
else {
const DiskLoc dl = nsd->firstExtent();
@@ -743,7 +743,7 @@ namespace {
// total number of slices across all the
// extents
BSONObjBuilder extentBuilder(extentsArrayBuilder.subobjStart());
- success = analyzeExtent(nsd, curExtent, subCommand, extentParams, errmsg,
+ success = analyzeExtent(collection, curExtent, subCommand, extentParams, errmsg,
extentBuilder);
extentBuilder.doneFast();
}
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index d80e2eca635..6c1c52b629f 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -349,7 +349,7 @@ namespace mongo {
break;
}
- DeletedRecord *d = loc.drec();
+ const DeletedRecord* d = collection->getRecordStore()->deletedRecordFor(loc);
delSize += d->lengthWithHeaders();
loc = d->nextDeleted();
k++;
diff --git a/src/mongo/db/diskloc.h b/src/mongo/db/diskloc.h
index 9fa401c6f60..3989cf3a705 100644
--- a/src/mongo/db/diskloc.h
+++ b/src/mongo/db/diskloc.h
@@ -166,7 +166,6 @@ namespace mongo {
*/
BSONObj obj() const; // TODO(ERH): remove
Record* rec() const; // TODO(ERH): remove
- DeletedRecord* drec() const; // TODO(ERH): remove
Extent* ext() const; // TODO(ERH): remove
template< class V >
diff --git a/src/mongo/db/storage/extent_manager.h b/src/mongo/db/storage/extent_manager.h
index 480f5c122cd..fcbb2132966 100644
--- a/src/mongo/db/storage/extent_manager.h
+++ b/src/mongo/db/storage/extent_manager.h
@@ -87,7 +87,7 @@ namespace mongo {
// TODO: make private
DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false );
- // TODO: remove?
+ // TODO(ERH): remove?
void preallocateAFile() { getFile( numFiles() , 0, true ); }
void flushFiles( bool sync );
@@ -109,6 +109,7 @@ namespace mongo {
/**
* @param loc - has to be for a specific Record
+ * TODO(ERH): remove this - only RecordStore can do this
*/
Record* recordFor( const DiskLoc& loc ) const;
@@ -134,14 +135,18 @@ namespace mongo {
// these WILL cross Extent boundaries
// * @param loc - has to be the DiskLoc for a Record
+ // * TODO(ERH): remove this - only RecordStore can do this
DiskLoc getNextRecord( const DiskLoc& loc ) const;
+ // * TODO(ERH): remove this - only RecordStore can do this
DiskLoc getPrevRecord( const DiskLoc& loc ) const;
// does NOT traverse extent boundaries
+ // * TODO(ERH): remove this - only RecordStore can do this
DiskLoc getNextRecordInExtent( const DiskLoc& loc ) const;
+ // * TODO(ERH): remove this - only RecordStore can do this
DiskLoc getPrevRecordInExtent( const DiskLoc& loc ) const;
/**
diff --git a/src/mongo/db/storage/record.cpp b/src/mongo/db/storage/record.cpp
index 327637754b0..2db9815902d 100644
--- a/src/mongo/db/storage/record.cpp
+++ b/src/mongo/db/storage/record.cpp
@@ -529,11 +529,6 @@ namespace mongo {
return cc().database()->getExtentManager().recordFor( *this );
}
- DeletedRecord* DiskLoc::drec() const {
- verify( _a != -1 );
- return reinterpret_cast<DeletedRecord*>(rec());
- }
-
Extent* DiskLoc::ext() const {
verify( a() != -1 );
return cc().database()->getExtentManager().getExtent(*this);
diff --git a/src/mongo/db/structure/record_store.h b/src/mongo/db/structure/record_store.h
index 6eca53d8a5d..921b61f082a 100644
--- a/src/mongo/db/structure/record_store.h
+++ b/src/mongo/db/structure/record_store.h
@@ -68,6 +68,10 @@ namespace mongo {
// TODO: this makes me sad, it shouldn't be in the interface
// do not use this anymore
virtual void increaseStorageSize( int size, int quotaMax ) = 0;
+
+ // TODO: another sad one
+ virtual const DeletedRecord* deletedRecordFor( const DiskLoc& loc ) const = 0;
+
protected:
std::string _ns;
};
diff --git a/src/mongo/db/structure/record_store_v1_base.cpp b/src/mongo/db/structure/record_store_v1_base.cpp
index 8ae6eb5c146..b5684de05e5 100644
--- a/src/mongo/db/structure/record_store_v1_base.cpp
+++ b/src/mongo/db/structure/record_store_v1_base.cpp
@@ -57,6 +57,16 @@ namespace mongo {
return _extentManager->recordFor( loc );
}
+ const DeletedRecord* RecordStoreV1Base::deletedRecordFor( const DiskLoc& loc ) const {
+ invariant( loc.a() != -1 );
+ return reinterpret_cast<const DeletedRecord*>( recordFor( loc ) );
+ }
+
+ DeletedRecord* RecordStoreV1Base::drec( const DiskLoc& loc ) const {
+ invariant( loc.a() != -1 );
+ return reinterpret_cast<DeletedRecord*>( recordFor( loc ) );
+ }
+
StatusWith<DiskLoc> RecordStoreV1Base::insertRecord( const DocWriter* doc, int quotaMax ) {
int lenWHdr = doc->documentSize() + Record::HeaderSize;
if ( doc->addPadding() )
@@ -156,7 +166,7 @@ namespace mongo {
unsigned long long *p = reinterpret_cast<unsigned long long *>( todelete->data() );
*getDur().writing(p) = 0;
}
- addDeletedRec((DeletedRecord*)todelete, dl);
+ addDeletedRec(dl);
}
}
@@ -208,7 +218,7 @@ namespace mongo {
_details->setLastExtentSize( e->length );
- addDeletedRec(emptyLoc.drec(), emptyLoc);
+ addDeletedRec(emptyLoc);
}
}
diff --git a/src/mongo/db/structure/record_store_v1_base.h b/src/mongo/db/structure/record_store_v1_base.h
index 6158ed725b6..d1d7d02f1a3 100644
--- a/src/mongo/db/structure/record_store_v1_base.h
+++ b/src/mongo/db/structure/record_store_v1_base.h
@@ -59,11 +59,16 @@ namespace mongo {
void increaseStorageSize( int size, int quotaMax );
+ // TODO: another sad one
+ virtual const DeletedRecord* deletedRecordFor( const DiskLoc& loc ) const;
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int quotaMax ) = 0;
// TODO: document, remove, what have you
- virtual void addDeletedRec(DeletedRecord *d, DiskLoc dloc) = 0;
+ virtual void addDeletedRec( const DiskLoc& dloc) = 0;
+
+ // TODO: another sad one
+ virtual DeletedRecord* drec( const DiskLoc& loc ) const;
/** add a record to the end of the linked list chain within this extent.
require: you must have already declared write intent for the record header.
diff --git a/src/mongo/db/structure/record_store_v1_capped.cpp b/src/mongo/db/structure/record_store_v1_capped.cpp
index 6c5fcdeb0f3..19f74802ca6 100644
--- a/src/mongo/db/structure/record_store_v1_capped.cpp
+++ b/src/mongo/db/structure/record_store_v1_capped.cpp
@@ -184,7 +184,7 @@ namespace mongo {
// possibly slice up if we've allocated too much space
- DeletedRecord *r = loc.drec();
+ DeletedRecord *r = drec( loc );
/* note we want to grab from the front so our next pointers on disk tend
to go in a forward direction which is important for performance. */
@@ -197,13 +197,13 @@ namespace mongo {
getDur().writingInt(r->lengthWithHeaders()) = lenToAlloc;
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
- DeletedRecord* newDel = newDelLoc.drec();
+ DeletedRecord* newDel = drec( newDelLoc );
DeletedRecord* newDelW = getDur().writing(newDel);
newDelW->extentOfs() = r->extentOfs();
newDelW->lengthWithHeaders() = left;
newDelW->nextDeleted().Null();
- addDeletedRec(newDel, newDelLoc);
+ addDeletedRec(newDelLoc);
return StatusWith<DiskLoc>( loc );
}
@@ -243,7 +243,7 @@ namespace mongo {
DiskLoc empty = ext.ext()->reuse( _ns, true );
ext.ext()->xprev.writing() = prev;
ext.ext()->xnext.writing() = next;
- addDeletedRec( empty.drec(), empty );
+ addDeletedRec( empty );
}
return Status::OK();
@@ -265,7 +265,7 @@ namespace mongo {
// Pull out capExtent's DRs from deletedList
DiskLoc i = cappedFirstDeletedInCurExtent();
- for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted() ) {
+ for (; !i.isNull() && inCapExtent( i ); i = deletedRecordFor( i )->nextDeleted() ) {
DDD( "\t" << i );
drecs.push_back( i );
}
@@ -282,23 +282,25 @@ namespace mongo {
j++;
if ( j == drecs.end() ) {
DDD( "\t compact adddelrec" );
- addDeletedRec(a.drec(), a);
+ addDeletedRec( a);
break;
}
DiskLoc b = *j;
- while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders() == b.getOfs() ) {
+ while ( a.a() == b.a() &&
+ a.getOfs() + drec( a )->lengthWithHeaders() == b.getOfs() ) {
+
// a & b are adjacent. merge.
- getDur().writingInt( a.drec()->lengthWithHeaders() ) += b.drec()->lengthWithHeaders();
+ getDur().writingInt( drec(a)->lengthWithHeaders() ) += drec(b)->lengthWithHeaders();
j++;
if ( j == drecs.end() ) {
DDD( "\t compact adddelrec2" );
- addDeletedRec(a.drec(), a);
+ addDeletedRec(a);
return;
}
b = *j;
}
DDD( "\t compact adddelrec3" );
- addDeletedRec(a.drec(), a);
+ addDeletedRec(a);
a = b;
}
@@ -308,7 +310,7 @@ namespace mongo {
if ( cappedLastDelRecLastExtent().isNull() )
return cappedListOfAllDeletedRecords();
else
- return cappedLastDelRecLastExtent().drec()->nextDeleted();
+ return drec(cappedLastDelRecLastExtent())->nextDeleted();
}
void CappedRecordStoreV1::cappedCheckMigrate() {
@@ -322,8 +324,8 @@ namespace mongo {
if ( first.isNull() )
continue;
DiskLoc last = first;
- for (; !last.drec()->nextDeleted().isNull(); last = last.drec()->nextDeleted() );
- last.drec()->nextDeleted().writing() = cappedListOfAllDeletedRecords();
+ for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted() );
+ drec(last)->nextDeleted().writing() = cappedListOfAllDeletedRecords();
cappedListOfAllDeletedRecords().writing() = first;
_details->_deletedList[i].writing() = DiskLoc();
}
@@ -350,7 +352,7 @@ namespace mongo {
bool CappedRecordStoreV1::nextIsInCapExtent( const DiskLoc &dl ) const {
invariant( !dl.isNull() );
- DiskLoc next = dl.drec()->nextDeleted();
+ DiskLoc next = drec(dl)->nextDeleted();
if ( next.isNull() )
return false;
return inCapExtent( next );
@@ -363,7 +365,7 @@ namespace mongo {
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
- for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted() );
+ for (; !i.isNull() && nextIsInCapExtent( i ); i = drec(i)->nextDeleted() );
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
}
@@ -381,10 +383,10 @@ namespace mongo {
DiskLoc prev = cappedLastDelRecLastExtent();
DiskLoc i = cappedFirstDeletedInCurExtent();
DiskLoc ret;
- for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted() ) {
+ for (; !i.isNull() && inCapExtent( i ); prev = i, i = drec(i)->nextDeleted() ) {
// We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
// so make sure there's space to create a DR at the end.
- if ( i.drec()->lengthWithHeaders() >= len + 24 ) {
+ if ( drec(i)->lengthWithHeaders() >= len + 24 ) {
ret = i;
break;
}
@@ -393,11 +395,11 @@ namespace mongo {
/* unlink ourself from the deleted list */
if ( !ret.isNull() ) {
if ( prev.isNull() )
- cappedListOfAllDeletedRecords().writing() = ret.drec()->nextDeleted();
+ cappedListOfAllDeletedRecords().writing() = drec(ret)->nextDeleted();
else
- prev.drec()->nextDeleted().writing() = ret.drec()->nextDeleted();
- ret.drec()->nextDeleted().writing().setInvalid(); // defensive.
- invariant( ret.drec()->extentOfs() < ret.getOfs() );
+ drec(prev)->nextDeleted().writing() = drec(ret)->nextDeleted();
+ drec(ret)->nextDeleted().writing().setInvalid(); // defensive.
+ invariant( drec(ret)->extentOfs() < ret.getOfs() );
}
return ret;
@@ -417,14 +419,14 @@ namespace mongo {
// cappedLastDelRecLastExtent() to that deleted record.
DiskLoc i = cappedListOfAllDeletedRecords();
for( ;
- !i.drec()->nextDeleted().isNull() &&
- !inCapExtent( i.drec()->nextDeleted() );
- i = i.drec()->nextDeleted() );
+ !drec(i)->nextDeleted().isNull() &&
+ !inCapExtent( drec(i)->nextDeleted() );
+ i = drec(i)->nextDeleted() );
// In our capped storage model, every extent must have at least one
// deleted record. Here we check that 'i' is not the last deleted
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
- invariant( !i.drec()->nextDeleted().isNull() );
+ invariant( !drec(i)->nextDeleted().isNull() );
cappedLastDelRecLastExtent().writing() = i;
}
}
@@ -539,7 +541,9 @@ namespace mongo {
return _details->_capExtent.ext();
}
- void CappedRecordStoreV1::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
+ void CappedRecordStoreV1::addDeletedRec( const DiskLoc& dloc ) {
+ DeletedRecord* d = drec( dloc );
+
BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
{
@@ -556,9 +560,9 @@ namespace mongo {
getDur().writingDiskLoc( cappedListOfAllDeletedRecords() ) = dloc;
else {
DiskLoc i = cappedListOfAllDeletedRecords();
- for (; !i.drec()->nextDeleted().isNull(); i = i.drec()->nextDeleted() )
+ for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted() )
;
- i.drec()->nextDeleted().writing() = dloc;
+ drec(i)->nextDeleted().writing() = dloc;
}
}
else {
diff --git a/src/mongo/db/structure/record_store_v1_capped.h b/src/mongo/db/structure/record_store_v1_capped.h
index 9dba1526d91..22750c522e3 100644
--- a/src/mongo/db/structure/record_store_v1_capped.h
+++ b/src/mongo/db/structure/record_store_v1_capped.h
@@ -63,7 +63,7 @@ namespace mongo {
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int quotaMax );
- virtual void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
+ virtual void addDeletedRec(const DiskLoc& dloc);
private:
// -- start copy from cap.cpp --
diff --git a/src/mongo/db/structure/record_store_v1_simple.cpp b/src/mongo/db/structure/record_store_v1_simple.cpp
index cdb59d48398..1a7f1a95854 100644
--- a/src/mongo/db/structure/record_store_v1_simple.cpp
+++ b/src/mongo/db/structure/record_store_v1_simple.cpp
@@ -117,7 +117,7 @@ namespace mongo {
prev = &_details->_deletedList[b];
continue;
}
- DeletedRecord *r = cur.drec();
+ DeletedRecord *r = drec(cur);
if ( r->lengthWithHeaders() >= lenToAlloc &&
r->lengthWithHeaders() < bestmatchlen ) {
bestmatchlen = r->lengthWithHeaders();
@@ -143,7 +143,7 @@ namespace mongo {
}
// unlink ourself from the deleted list
- DeletedRecord *bmr = bestmatch.drec();
+ DeletedRecord *bmr = drec(bestmatch);
*getDur().writing(bestprev) = bmr->nextDeleted();
bmr->nextDeleted().writing().setInvalid(); // defensive.
invariant(bmr->extentOfs() < bestmatch.getOfs());
@@ -157,7 +157,7 @@ namespace mongo {
// determine if we should chop up
- DeletedRecord *r = loc.drec();
+ DeletedRecord *r = drec(loc);
/* note we want to grab from the front so our next pointers on disk tend
to go in a forward direction which is important for performance. */
@@ -189,13 +189,13 @@ namespace mongo {
getDur().writingInt(r->lengthWithHeaders()) = lenToAlloc;
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
- DeletedRecord* newDel = newDelLoc.drec();
+ DeletedRecord* newDel = drec(newDelLoc);
DeletedRecord* newDelW = getDur().writing(newDel);
newDelW->extentOfs() = r->extentOfs();
newDelW->lengthWithHeaders() = left;
newDelW->nextDeleted().Null();
- addDeletedRec(newDel, newDelLoc);
+ addDeletedRec( newDelLoc );
return loc;
@@ -242,7 +242,8 @@ namespace mongo {
"SimpleRecordStoreV1::truncate not implemented" );
}
- void SimpleRecordStoreV1::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
+ void SimpleRecordStoreV1::addDeletedRec( const DiskLoc& dloc ) {
+ DeletedRecord* d = drec( dloc );
BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
{
diff --git a/src/mongo/db/structure/record_store_v1_simple.h b/src/mongo/db/structure/record_store_v1_simple.h
index 95ff71342b9..b9be4f9ee44 100644
--- a/src/mongo/db/structure/record_store_v1_simple.h
+++ b/src/mongo/db/structure/record_store_v1_simple.h
@@ -49,7 +49,7 @@ namespace mongo {
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int quotaMax );
- virtual void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
+ virtual void addDeletedRec(const DiskLoc& dloc);
private:
DiskLoc _allocFromExistingExtents( int lengthWithHeaders );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index df1d4ab5a5f..7149e0c9478 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1130,10 +1130,13 @@ namespace NamespaceTests {
}
ASSERT( !deleted.isNull() );
+ const RecordStore* rs = collection()->getRecordStore();
+
// Shrink the DeletedRecord's size to newDeletedRecordSize.
- ASSERT_GREATER_THAN_OR_EQUALS( deleted.drec()->lengthWithHeaders(),
+ ASSERT_GREATER_THAN_OR_EQUALS( rs->deletedRecordFor( deleted )->lengthWithHeaders(),
newDeletedRecordSize );
- getDur().writingInt( deleted.drec()->lengthWithHeaders() ) = newDeletedRecordSize;
+ DeletedRecord* dr = const_cast<DeletedRecord*>( rs->deletedRecordFor( deleted ) );
+ getDur().writingInt( dr->lengthWithHeaders() ) = newDeletedRecordSize;
// Re-insert the DeletedRecord into the deletedList bucket appropriate for its
// new size.
@@ -1581,15 +1584,18 @@ namespace NamespaceTests {
create();
cookDeletedList( 344 );
+ const RecordStore* rs = collection()->getRecordStore();
+
// The returned record is quantized from 300 to 320.
StatusWith<DiskLoc> actualLocation = collection()->insertDocument( docForRecordSize(300),
false );
ASSERT( actualLocation.isOK() );
- Record* rec = collection()->getRecordStore()->recordFor( actualLocation.getValue() );
+ Record* rec = rs->recordFor( actualLocation.getValue() );
ASSERT_EQUALS( 320, rec->lengthWithHeaders() );
// A new 24 byte deleted record is split off.
- ASSERT_EQUALS( 24, smallestDeletedRecord().drec()->lengthWithHeaders() );
+ ASSERT_EQUALS( 24,
+ rs->deletedRecordFor(smallestDeletedRecord())->lengthWithHeaders() );
}
virtual string spec() const { return "{ flags : 0 }"; }
};