From 08319d688517af8ac88fe7054ea10f1f08c5bf2d Mon Sep 17 00:00:00 2001 From: Mathias Stearn Date: Wed, 17 Sep 2014 18:41:18 -0400 Subject: SERVER-15273 Only quantize to exact bucket sizes Prep for new freelist implementation. --- .../storage/mmap_v1/catalog/namespace_details.cpp | 2 +- .../db/storage/mmap_v1/catalog/namespace_details.h | 2 +- .../catalog/namespace_details_rsv1_metadata.cpp | 14 - .../catalog/namespace_details_rsv1_metadata.h | 3 - .../db/storage/mmap_v1/record_store_v1_base.cpp | 107 +--- .../db/storage/mmap_v1/record_store_v1_base.h | 24 +- .../db/storage/mmap_v1/record_store_v1_capped.cpp | 2 +- .../db/storage/mmap_v1/record_store_v1_simple.cpp | 166 ++---- .../mmap_v1/record_store_v1_simple_test.cpp | 601 +++------------------ .../storage/mmap_v1/record_store_v1_test_help.cpp | 9 - .../db/storage/mmap_v1/record_store_v1_test_help.h | 4 - 11 files changed, 132 insertions(+), 802 deletions(-) (limited to 'src/mongo') diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp index 5a5364403c4..970d6e29c71 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp @@ -66,7 +66,7 @@ namespace mongo { nIndexes = 0; isCapped = capped; maxDocsInCapped = 0x7fffffff; // no limit (value is for pre-v2.3.2 compatibility) - paddingFactor = 1.0; + paddingFactorOldDoNotUse = 1.0; systemFlagsOldDoNotUse = 0; userFlags = 0; capFirstNewRecord = DiskLoc(); diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h index 48ad001b90b..5afcd715128 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h @@ -92,7 +92,7 @@ namespace mongo { int maxDocsInCapped; // max # of objects for a capped table, -1 for inf. - double paddingFactor; // 1.0 = no padding. + double paddingFactorOldDoNotUse; // ofs 368 (16) int systemFlagsOldDoNotUse; // things that the system sets/cares about diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp index 54d74ae1d2e..b78ad7bef59 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp @@ -175,20 +175,6 @@ namespace mongo { return _details->maxDocsInCapped; } - double NamespaceDetailsRSV1MetaData::paddingFactor() const { - return _details->paddingFactor; - } - - void NamespaceDetailsRSV1MetaData::setPaddingFactor( OperationContext* txn, double paddingFactor ) { - if ( paddingFactor == _details->paddingFactor ) - return; - - if ( _details->isCapped ) - return; - - *txn->recoveryUnit()->writing(&_details->paddingFactor) = paddingFactor; - } - void NamespaceDetailsRSV1MetaData::_syncUserFlags( OperationContext* txn ) { if ( !_namespaceRecordStore ) return; diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h index 9f933d003e5..609691ec5ed 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h @@ -96,9 +96,6 @@ namespace mongo { virtual long long maxCappedDocs() const; - virtual double paddingFactor() const; - virtual void setPaddingFactor( OperationContext* txn, double paddingFactor ); - private: void _syncUserFlags( OperationContext* txn ); diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp index 0f23a5e521e..a7ea59aea52 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp @@ -241,8 +241,8 @@ namespace mongo { "record has to be >= 4 bytes" ); } int lenWHdr = docSize + Record::HeaderSize; - if ( doc->addPadding() ) - lenWHdr = getRecordAllocationSize( lenWHdr ); + if (doc->addPadding() && !isCapped()) + lenWHdr = quantizeAllocationSpace( lenWHdr ); StatusWith loc = allocRecord( txn, lenWHdr, enforceQuota ); if ( !loc.isOK() ) @@ -258,8 +258,6 @@ namespace mongo { _details->incrementStats( txn, r->netLength(), 1 ); - _paddingFits( txn ); - return loc; } @@ -273,11 +271,7 @@ namespace mongo { "record has to be >= 4 bytes" ); } - StatusWith status = _insertRecord( txn, data, len, enforceQuota ); - if ( status.isOK() ) - _paddingFits( txn ); - - return status; + return _insertRecord( txn, data, len, enforceQuota ); } StatusWith RecordStoreV1Base::_insertRecord( OperationContext* txn, @@ -285,7 +279,9 @@ namespace mongo { int len, bool enforceQuota ) { - int lenWHdr = getRecordAllocationSize( len + Record::HeaderSize ); + int lenWHdr = len + Record::HeaderSize; + if (!isCapped()) + lenWHdr = quantizeAllocationSpace( lenWHdr ); fassert( 17208, lenWHdr >= ( len + Record::HeaderSize ) ); StatusWith loc = allocRecord( txn, lenWHdr, enforceQuota ); @@ -315,7 +311,6 @@ namespace mongo { Record* oldRecord = recordFor( oldLocation ); if ( oldRecord->netLength() >= dataSize ) { // we fit - _paddingFits( txn ); memcpy( txn->recoveryUnit()->writingPtr( oldRecord->data(), dataSize ), data, dataSize ); return StatusWith( oldLocation ); } @@ -327,8 +322,6 @@ namespace mongo { // we have to move - _paddingTooSmall( txn ); - StatusWith newLocation = _insertRecord( txn, data, dataSize, enforceQuota ); if ( !newLocation.isOK() ) return newLocation; @@ -354,8 +347,6 @@ namespace mongo { const RecordData& oldRec, const char* damageSource, const mutablebson::DamageVector& damages ) { - _paddingFits( txn ); - Record* rec = recordFor( loc ); char* root = rec->data(); @@ -519,7 +510,6 @@ namespace mongo { output->appendNumber("datasize", _details->dataSize()); output->appendNumber("nrecords", _details->numRecords()); output->appendNumber("lastExtentSize", _details->lastExtentSize(txn)); - output->appendNumber("padding", _details->paddingFactor()); if ( _details->firstExtent(txn).isNull() ) output->append( "firstExtent", "null" ); @@ -647,7 +637,6 @@ namespace mongo { int n = 0; int nInvalid = 0; long long nQuantizedSize = 0; - long long nPowerOf2QuantizedSize = 0; long long len = 0; long long nlen = 0; long long bsonLen = 0; @@ -681,13 +670,6 @@ namespace mongo { ++nQuantizedSize; } - if ( r->lengthWithHeaders() == - quantizePowerOf2AllocationSpace( r->lengthWithHeaders() ) ) { - // Count the number of records having a size consistent with the - // quantizePowerOf2AllocationSpace quantization implementation. - ++nPowerOf2QuantizedSize; - } - if (full){ size_t dataSize = 0; const Status status = adaptor->validate( r->toRecordData(), &dataSize ); @@ -720,7 +702,6 @@ namespace mongo { } output->appendNumber("nQuantizedSize", nQuantizedSize); - output->appendNumber("nPowerOf2QuantizedSize", nPowerOf2QuantizedSize); output->appendNumber("bytesWithHeaders", len); output->appendNumber("bytesWithoutHeaders", nlen); @@ -804,7 +785,9 @@ namespace mongo { BSONObjBuilder* result, double scale ) const { result->append( "lastExtentSize", _details->lastExtentSize(txn) / scale ); - result->append( "paddingFactor", _details->paddingFactor() ); + result->append( "paddingFactor", 1.0 ); // hard coded + result->append( "paddingFactorNote", "paddingFactor is unused and unmaintained in 2.8. It " + "remains hard coded to 1.0 for compatibility only." ); result->append( "userFlags", _details->userFlags() ); if ( isCapped() ) { @@ -863,22 +846,6 @@ namespace mongo { return Status::OK(); } - int RecordStoreV1Base::getRecordAllocationSize( int minRecordSize ) const { - - if ( isCapped() ) - return minRecordSize; - - invariant( _details->paddingFactor() >= 1 ); - - if ( _details->isUserFlagSet( Flag_UsePowerOf2Sizes ) ) { - // quantize to the nearest bucketSize (or nearest 1mb boundary for large sizes). - return quantizePowerOf2AllocationSpace(minRecordSize); - } - - // adjust for padding factor - return static_cast(minRecordSize * _details->paddingFactor()); - } - DiskLoc RecordStoreV1Base::IntraExtentIterator::getNext() { if (_curr.isNull()) return DiskLoc(); @@ -896,73 +863,31 @@ namespace mongo { } } - /* @return the size for an allocated record quantized to 1/16th of the BucketSize - @param allocSize requested size to allocate - */ int RecordStoreV1Base::quantizeAllocationSpace(int allocSize) { - const int bucketIdx = bucket(allocSize); - int bucketSize = bucketSizes[bucketIdx]; - int quantizeUnit = bucketSize / 16; - if (allocSize >= (1 << 22)) // 4mb - // all allocatons >= 4mb result in 4mb/16 quantization units, even if >= 8mb. idea is - // to reduce quantization overhead of large records at the cost of increasing the - // DeletedRecord size distribution in the largest bucket by factor of 4. - quantizeUnit = (1 << 18); // 256k - if (allocSize % quantizeUnit == 0) - // size is already quantized - return allocSize; - const int quantizedSpace = (allocSize | (quantizeUnit - 1)) + 1; - fassert(16484, quantizedSpace >= allocSize); - return quantizedSpace; - } - - int RecordStoreV1Base::quantizePowerOf2AllocationSpace(int allocSize) { - for ( int i = 0; i < MaxBucket; i++ ) { // skips the largest (16MB) bucket + for ( int i = 0; i < Buckets; i++ ) { if ( bucketSizes[i] >= allocSize ) { // Return the size of the first bucket sized >= the requested size. return bucketSizes[i]; } } - // if we get here, it means we're allocating more than 4mb, so round up - // to the nearest megabyte >= allocSize - const int MB = 1024*1024; - invariant(allocSize > 4*MB); - return (allocSize + (MB - 1)) & ~(MB - 1); // round up to MB alignment + // TODO make a specific bucket large enough to hold all documents rather than doing this. + invariant(allocSize < bucketSizes[MaxBucket] + 1024*1024); + return bucketSizes[MaxBucket] + 1024*1024; } int RecordStoreV1Base::bucket(int size) { for ( int i = 0; i < Buckets; i++ ) { if ( bucketSizes[i] > size ) { - // Return the first bucket sized _larger_ than the requested size. + // Return the first bucket sized _larger_ than the requested size. This is important + // since we want all records in a bucket to be >= the quantized size, therefore the + // quantized size must be the smallest allowed record per bucket. return i; } } return MaxBucket; } - void RecordStoreV1Base::_paddingFits( OperationContext* txn ) { - MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less - double x = max(1.0, _details->paddingFactor() - 0.001 ); - _details->setPaddingFactor( txn, x ); - } - } - - void RecordStoreV1Base::_paddingTooSmall( OperationContext* txn ) { - MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less - /* the more indexes we have, the higher the cost of a move. so we take that into - account herein. note on a move that insert() calls paddingFits(), thus - here for example with no inserts and nIndexes = 1 we have - .001*4-.001 or a 3:1 ratio to non moves -> 75% nonmoves. insert heavy - can pushes this down considerably. further tweaking will be a good idea but - this should be an adequate starting point. - */ - double N = 4; // magic - double x = min(2.0,_details->paddingFactor() + (0.001 * N)); - _details->setPaddingFactor( txn, x ); - } - } - Status RecordStoreV1Base::setCustomOption( OperationContext* txn, const BSONElement& option, BSONObjBuilder* info ) { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h index 5675027a37f..8c1141b0ec4 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h @@ -91,10 +91,6 @@ namespace mongo { virtual long long maxCappedDocs() const = 0; - virtual double paddingFactor() const = 0; - - virtual void setPaddingFactor( OperationContext* txn, double paddingFactor ) = 0; - }; class RecordStoreV1Base : public RecordStore { @@ -177,13 +173,6 @@ namespace mongo { const RecordStoreV1MetaData* details() const { return _details.get(); } - /** - * @return the actual size to create - * will be >= oldRecordSize - * based on padding and any other flags - */ - int getRecordAllocationSize( int minRecordSize ) const; - DiskLoc getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const; DiskLoc getNextRecord( OperationContext* txn, const DiskLoc& loc ) const; @@ -192,16 +181,10 @@ namespace mongo { DiskLoc getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const; DiskLoc getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const; - /* @return the size for an allocated record quantized to 1/16th of the BucketSize. - @param allocSize requested size to allocate - The returned size will be greater than or equal to 'allocSize'. - */ - static int quantizeAllocationSpace(int allocSize); - /** - * Quantize 'allocSize' to the nearest bucketSize (or nearest 1mb boundary for large sizes). + * Quantize 'minSize' to the nearest allocation size. */ - static int quantizePowerOf2AllocationSpace(int allocSize); + static int quantizeAllocationSpace(int minSize); /* return which "deleted bucket" for this size object */ static int bucket(int size); @@ -249,9 +232,6 @@ namespace mongo { */ void _addRecordToRecListInExtent(OperationContext* txn, Record* r, DiskLoc loc); - void _paddingTooSmall( OperationContext* txn ); - void _paddingFits( OperationContext* txn ); - /** * internal * doesn't check inputs or change padding diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp index 725ecd1a6af..8d6a0672a43 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp @@ -229,7 +229,7 @@ namespace mongo { // nIndexes preserve 0 // capped preserve true // max preserve - _details->setPaddingFactor( txn, 1.0 ); + // paddingFactor is unused _details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() ); setLastDelRecLastExtent( txn, DiskLoc().setInvalid() ); // dataFileVersion preserve diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp index aca3f99df4a..4b8a51e9e87 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp @@ -54,6 +54,7 @@ namespace mongo { static Counter64 freelistBucketExhausted; static Counter64 freelistIterations; + // TODO figure out what to do about these. static ServerStatusMetricField dFreelist1( "storage.freelist.search.requests", &freelistAllocs ); @@ -72,157 +73,59 @@ namespace mongo { invariant( !details->isCapped() ); _normalCollection = NamespaceString::normal( ns ); - if ( _details->paddingFactor() == 0 ) { - warning() << "implicit updgrade of paddingFactor of very old collection" << endl; - WriteUnitOfWork wunit(txn); - _details->setPaddingFactor(txn, 1.0); - wunit.commit(); - } - } SimpleRecordStoreV1::~SimpleRecordStoreV1() { } DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents( OperationContext* txn, - int lenToAlloc ) { + int lenToAllocRaw ) { // align size up to a multiple of 4 - lenToAlloc = (lenToAlloc + (4-1)) & ~(4-1); + const int lenToAlloc = (lenToAllocRaw + (4-1)) & ~(4-1); freelistAllocs.increment(); DiskLoc loc; + DeletedRecord* dr = NULL; { - DiskLoc *prev = 0; - DiskLoc *bestprev = 0; - DiskLoc bestmatch; - int bestmatchlen = INT_MAX; // sentinel meaning we haven't found a record big enough - int b = bucket(lenToAlloc); - DiskLoc cur = _details->deletedListEntry(b); - - int extra = 5; // look for a better fit, a little. - int chain = 0; - while ( 1 ) { - { // defensive check - int fileNumber = cur.a(); - int fileOffset = cur.getOfs(); - if (fileNumber < -1 || fileNumber >= 100000 || fileOffset < 0) { - StringBuilder sb; - sb << "Deleted record list corrupted in collection " << _ns - << ", bucket " << b - << ", link number " << chain - << ", invalid link is " << cur.toString() - << ", throwing Fatal Assertion"; - log() << sb.str() << endl; - fassertFailed(16469); - } - } - if ( cur.isNull() ) { - // move to next bucket. if we were doing "extra", just break - if ( bestmatchlen < INT_MAX ) - break; - - if ( chain > 0 ) { - // if we looked at things in the right bucket, but they were not suitable - freelistBucketExhausted.increment(); - } - b++; - if ( b > MaxBucket ) { - // out of space. alloc a new extent. - freelistIterations.increment( 1 + chain ); - return DiskLoc(); - } - cur = _details->deletedListEntry(b); - prev = 0; - continue; - } - DeletedRecord *r = drec(cur); - if ( r->lengthWithHeaders() >= lenToAlloc && - r->lengthWithHeaders() < bestmatchlen ) { - bestmatchlen = r->lengthWithHeaders(); - bestmatch = cur; - bestprev = prev; - if (r->lengthWithHeaders() == lenToAlloc) - // exact match, stop searching - break; - } - if ( bestmatchlen < INT_MAX && --extra <= 0 ) + int myBucket; + for (myBucket = bucket(lenToAlloc); myBucket < Buckets; myBucket++) { + // Only look at the first entry in each bucket. This works because we are either + // quantizing or allocating fixed-size blocks. + const DiskLoc head = _details->deletedListEntry(myBucket); + if (head.isNull()) continue; + DeletedRecord* const candidate = drec(head); + if (candidate->lengthWithHeaders() >= lenToAlloc) { + loc = head; + dr = candidate; break; - if ( ++chain > 30 && b <= MaxBucket ) { - // too slow, force move to next bucket to grab a big chunk - //b++; - freelistIterations.increment( chain ); - chain = 0; - cur.Null(); - } - else { - cur = r->nextDeleted(); - prev = &r->nextDeleted(); } } - // unlink ourself from the deleted list - DeletedRecord *bmr = drec(bestmatch); - if ( bestprev ) { - *txn->recoveryUnit()->writing(bestprev) = bmr->nextDeleted(); - } - else { - // should be the front of a free-list - int myBucket = bucket(bmr->lengthWithHeaders()); - invariant( _details->deletedListEntry(myBucket) == bestmatch ); - _details->setDeletedListEntry(txn, myBucket, bmr->nextDeleted()); - } - *txn->recoveryUnit()->writing(&bmr->nextDeleted()) = DiskLoc().setInvalid(); // defensive. - invariant(bmr->extentOfs() < bestmatch.getOfs()); + if (!dr) + return DiskLoc(); // no space - freelistIterations.increment( 1 + chain ); - loc = bestmatch; + // Unlink ourself from the deleted list + _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted()); + *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive } - if ( loc.isNull() ) - return loc; - - // determine if we should chop up + invariant( dr->extentOfs() < loc.getOfs() ); - DeletedRecord *r = drec(loc); + // Split the deleted record if it has at least as much left over space as our smallest + // allocation size. Otherwise, just take the whole DeletedRecord. + const int remainingLength = dr->lengthWithHeaders() - lenToAlloc; + if (remainingLength >= bucketSizes[0]) { + txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc; + const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc); + DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc)); + newDel->extentOfs() = dr->extentOfs(); + newDel->lengthWithHeaders() = remainingLength; + newDel->nextDeleted().Null(); - /* note we want to grab from the front so our next pointers on disk tend - to go in a forward direction which is important for performance. */ - int regionlen = r->lengthWithHeaders(); - invariant( r->extentOfs() < loc.getOfs() ); - - int left = regionlen - lenToAlloc; - if ( left < 24 || left < (lenToAlloc / 8) ) { - // you get the whole thing. - return loc; + addDeletedRec(txn, newDelLoc); } - // don't quantize: - // - $ collections (indexes) as we already have those aligned the way we want SERVER-8425 - if ( _normalCollection ) { - // we quantize here so that it only impacts newly sized records - // this prevents oddities with older records and space re-use SERVER-8435 - lenToAlloc = std::min( r->lengthWithHeaders(), - quantizeAllocationSpace( lenToAlloc ) ); - left = regionlen - lenToAlloc; - - if ( left < 24 ) { - // you get the whole thing. - return loc; - } - } - - /* split off some for further use. */ - txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc; - DiskLoc newDelLoc = loc; - newDelLoc.inc(lenToAlloc); - DeletedRecord* newDel = drec(newDelLoc); - DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel); - newDelW->extentOfs() = r->extentOfs(); - newDelW->lengthWithHeaders() = left; - newDelW->nextDeleted().Null(); - - addDeletedRec( txn, newDelLoc ); return loc; } @@ -426,11 +329,8 @@ namespace mongo { const unsigned minAllocationSize = rawDataSize + Record::HeaderSize; unsigned allocationSize = minAllocationSize; switch( compactOptions->paddingMode ) { - case CompactOptions::NONE: // no padding, unless using powerOf2Sizes - if ( _details->isUserFlagSet(Flag_UsePowerOf2Sizes) ) - allocationSize = quantizePowerOf2AllocationSpace(minAllocationSize); - else - allocationSize = minAllocationSize; + case CompactOptions::NONE: // default padding + allocationSize = quantizeAllocationSpace(minAllocationSize); break; case CompactOptions::PRESERVE: // keep original padding diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp index 150611e1201..6b5dd87bf1d 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp @@ -40,57 +40,23 @@ using namespace mongo; namespace { - // Provides data to be inserted. Must be large enough for largest possible record. - // Should be in BSS so unused portions should be free. - char zeros[20*1024*1024] = {}; - TEST( SimpleRecordStoreV1, quantizeAllocationSpaceSimple ) { - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 36); - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024); - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 10240); - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 106496); - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1048576); - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 10223616); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 64); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 16*1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 128*1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1024*1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 16*1024*1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16*1024*1024), 16*1024*1024); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16*1024*1024 + 1), 17*1024*1024); } TEST( SimpleRecordStoreV1, quantizeAllocationMinMaxBound ) { const int maxSize = 16 * 1024 * 1024; - ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 2); + ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 32); ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(maxSize), maxSize); } - /** - * Test Quantize record allocation on every boundary, as well as boundary-1 - * @see NamespaceDetails::quantizeAllocationSpace() - */ - TEST( SimpleRecordStoreV1, quantizeAllocationBoundary ) { - for (int iBucket = 0; iBucket <= RecordStoreV1Base::MaxBucket; ++iBucket) { - // for each bucket in range [min, max) - const int bucketSize = RecordStoreV1Base::bucketSizes[iBucket]; - const int prevBucketSize = - (iBucket - 1 >= 0) ? RecordStoreV1Base::bucketSizes[iBucket - 1] : 0; - const int intervalSize = bucketSize / 16; - for (int iBoundary = prevBucketSize; - iBoundary < bucketSize; - iBoundary += intervalSize) { - // for each quantization boundary within the bucket - for (int iSize = iBoundary - 1; iSize <= iBoundary; ++iSize) { - // test the quantization boundary - 1, and the boundary itself - const int quantized = - RecordStoreV1Base::quantizeAllocationSpace(iSize); - // assert quantized size is greater than or equal to requested size - ASSERT(quantized >= iSize); - // assert quantized size is within one quantization interval of - // the requested size - ASSERT(quantized - iSize <= intervalSize); - // assert quantization is an idempotent operation - ASSERT(quantized == - RecordStoreV1Base::quantizeAllocationSpace(quantized)); - } - } - } - } - /** * For buckets up to 4MB powerOf2 allocation should round up to next power of 2. It should be * return the input unmodified if it is already a power of 2. @@ -103,45 +69,20 @@ namespace { // size - 1 is quantized to size. ASSERT_EQUALS( size, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( size - 1 ) ); + RecordStoreV1Base::quantizeAllocationSpace( size - 1 ) ); // size is quantized to size. ASSERT_EQUALS( size, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( size ) ); + RecordStoreV1Base::quantizeAllocationSpace( size ) ); // size + 1 is quantized to nextSize (unless > 4MB which is covered by next test) if (size < 4*1024*1024) { ASSERT_EQUALS( nextSize, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( size + 1 ) ); + RecordStoreV1Base::quantizeAllocationSpace( size + 1 ) ); } } } - /** - * Within the largest bucket, quantizePowerOf2AllocationSpace quantizes to the nearest - * megabyte boundary. - */ - TEST( SimpleRecordStoreV1, SimpleRecordLargePowerOf2ToMegabyteBoundary ) { - // Iterate iSize over all 1mb boundaries from the size of the next to largest bucket - // to the size of the largest bucket + 1mb. - for( int iSize = RecordStoreV1Base::bucketSizes[ RecordStoreV1Base::MaxBucket - 1 ]; - iSize <= RecordStoreV1Base::bucketSizes[ RecordStoreV1Base::MaxBucket ] + 0x100000; - iSize += 0x100000 ) { - - // iSize - 1 is quantized to iSize. - ASSERT_EQUALS( iSize, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize - 1 ) ); - - // iSize is quantized to iSize. - ASSERT_EQUALS( iSize, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize ) ); - - // iSize + 1 is quantized to iSize + 1mb. - ASSERT_EQUALS( iSize + 0x100000, - RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize + 1 ) ); - } - } - BSONObj docForRecordSize( int size ) { BSONObjBuilder b; b.append( "_id", 5 ); @@ -151,6 +92,19 @@ namespace { return x; } + class BsonDocWriter : public DocWriter { + public: + BsonDocWriter(const BSONObj& obj, bool padding) : _obj(obj), _padding(padding) {} + + virtual void writeDocument(char* buf) const { memcpy(buf, _obj.objdata(), _obj.objsize()); } + virtual size_t documentSize() const { return _obj.objsize(); } + virtual bool addPadding() const { return _padding; } + + private: + BSONObj _obj; + bool _padding; + }; + /** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */ TEST(SimpleRecordStoreV1, AllocQuantized) { OperationContextNoop txn; @@ -165,32 +119,30 @@ namespace { ASSERT( result.isOK() ); // The length of the allocated record is quantized. - ASSERT_EQUALS( 320, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); + ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); } - /** - * alloc() does not quantize records in index collections using quantizeAllocationSpace() - * rules. - */ - TEST(SimpleRecordStoreV1, AllocIndexNamespaceNotQuantized) { + /** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */ + TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - string myns = "test.AllocIndexNamespaceNotQuantized"; - SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false ); + string myns = "test.AllocQuantized"; + SimpleRecordStoreV1 rs( &txn, myns, md, &em, false ); - BSONObj obj = docForRecordSize( 300 ); - StatusWith result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), true); + StatusWith result = rs.insertRecord(&txn, &docWriter, false); ASSERT( result.isOK() ); - // The length of the allocated record is not quantized. - ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); - + // The length of the allocated record is quantized. + ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); } - /** alloc() quantizes records in index collections to the nearest multiple of 4. */ - TEST(SimpleRecordStoreV1, AllocIndexNamespaceSlightlyQuantized) { + /** + * alloc() does not quantize records if DocWriter::addPadding() returns false + */ + TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); @@ -198,46 +150,34 @@ namespace { string myns = "test.AllocIndexNamespaceNotQuantized"; SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false ); - BSONObj obj = docForRecordSize( 298 ); - StatusWith result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), false); + StatusWith result = rs.insertRecord(&txn, &docWriter, false); ASSERT( result.isOK() ); + // The length of the allocated record is not quantized. ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); + } - /** alloc() returns a non quantized record larger than the requested size. */ - TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecord) { + /** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */ + TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - { - LocAndSize drecs[] = { - {DiskLoc(0, 1000), 310}, - {} - }; - initializeV1RS(&txn, NULL, drecs, &em, md); - } + string myns = "test.AllocIndexNamespaceNotQuantized"; + SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false ); - BSONObj obj = docForRecordSize( 300 ); - StatusWith actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); - ASSERT_OK( actualLocation.getStatus() ); + BsonDocWriter docWriter(docForRecordSize( 298 ), false); + StatusWith result = rs.insertRecord(&txn, &docWriter, false); + ASSERT( result.isOK() ); - { - LocAndSize recs[] = { - {DiskLoc(0, 1000), 310}, - {} - }; - LocAndSize drecs[] = { - {} - }; - assertStateV1RS(&txn, recs, drecs, &em, md); - } + ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize ); } - - /** alloc() returns a non quantized record equal to the requested size. */ - TEST(SimpleRecordStoreV1, AllocExactSizeNonQuantizedDeletedRecord) { + /** + * alloc() with quantized size doesn't split if enough room left over. + */ + TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); @@ -245,19 +185,19 @@ namespace { { LocAndSize drecs[] = { - {DiskLoc(0, 1000), 300}, + {DiskLoc(0, 1000), 512 + 31}, {} }; initializeV1RS(&txn, NULL, drecs, &em, md); } - BSONObj obj = docForRecordSize( 300 ); - StatusWith actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), true); + StatusWith actualLocation = rs.insertRecord(&txn, &docWriter, false); ASSERT_OK( actualLocation.getStatus() ); { LocAndSize recs[] = { - {DiskLoc(0, 1000), 300}, + {DiskLoc(0, 1000), 512 + 31}, {} }; LocAndSize drecs[] = { @@ -268,10 +208,9 @@ namespace { } /** - * alloc() returns a non quantized record equal to the quantized size plus some extra space - * too small to make a DeletedRecord. + * alloc() with quantized size splits if enough room left over. */ - TEST(SimpleRecordStoreV1, AllocQuantizedWithExtra) { + TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); @@ -279,22 +218,23 @@ namespace { { LocAndSize drecs[] = { - {DiskLoc(0, 1000), 343}, + {DiskLoc(0, 1000), 512 + 32}, {} }; initializeV1RS(&txn, NULL, drecs, &em, md); } - BSONObj obj = docForRecordSize( 300 ); - StatusWith actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), true); + StatusWith actualLocation = rs.insertRecord(&txn, &docWriter, false); ASSERT_OK( actualLocation.getStatus() ); { LocAndSize recs[] = { - {DiskLoc(0, 1000), 343}, + {DiskLoc(0, 1000), 512}, {} }; LocAndSize drecs[] = { + {DiskLoc(0, 1512), 32}, {} }; assertStateV1RS(&txn, recs, drecs, &em, md); @@ -302,10 +242,9 @@ namespace { } /** - * alloc() returns a quantized record when the extra space in the reclaimed deleted record - * is large enough to form a new deleted record. + * alloc() with non quantized size doesn't split if enough room left over. */ - TEST(SimpleRecordStoreV1, AllocQuantizedWithoutExtra) { + TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); @@ -313,26 +252,22 @@ namespace { { LocAndSize drecs[] = { - {DiskLoc(0, 1000), 344}, + {DiskLoc(0, 1000), 331}, {} }; initializeV1RS(&txn, NULL, drecs, &em, md); } - - BSONObj obj = docForRecordSize( 300 ); - StatusWith actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), false); + StatusWith actualLocation = rs.insertRecord(&txn, &docWriter, false); ASSERT_OK( actualLocation.getStatus() ); { LocAndSize recs[] = { - // The returned record is quantized from 300 to 320. - {DiskLoc(0, 1000), 320}, + {DiskLoc(0, 1000), 331}, {} }; LocAndSize drecs[] = { - // A new 24 byte deleted record is split off. - {DiskLoc(0, 1320), 24}, {} }; assertStateV1RS(&txn, recs, drecs, &em, md); @@ -340,10 +275,9 @@ namespace { } /** - * A non quantized deleted record within 1/8 of the requested size is returned as is, even - * if a quantized portion of the deleted record could be used instead. + * alloc() with non quantized size splits if enough room left over. */ - TEST(SimpleRecordStoreV1, AllocNotQuantizedNearDeletedSize) { + TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) { OperationContextNoop txn; DummyExtentManager em; DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); @@ -351,87 +285,29 @@ namespace { { LocAndSize drecs[] = { - {DiskLoc(0, 1000), 344}, + {DiskLoc(0, 1000), 332}, {} }; initializeV1RS(&txn, NULL, drecs, &em, md); } - BSONObj obj = docForRecordSize( 319 ); - StatusWith actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false); + BsonDocWriter docWriter(docForRecordSize( 300 ), false); + StatusWith actualLocation = rs.insertRecord(&txn, &docWriter, false); ASSERT_OK( actualLocation.getStatus() ); - // Even though 319 would be quantized to 320 and 344 - 320 == 24 could become a new - // deleted record, the entire deleted record is returned because - // ( 344 - 320 ) < ( 320 / 8 ). - { LocAndSize recs[] = { - {DiskLoc(0, 1000), 344}, + {DiskLoc(0, 1000), 300}, {} }; LocAndSize drecs[] = { + {DiskLoc(0, 1300), 32}, {} }; assertStateV1RS(&txn, recs, drecs, &em, md); } } - /** getRecordAllocationSize() returns its argument when the padding factor is 1.0. */ - TEST(SimpleRecordStoreV1, GetRecordAllocationSizeNoPadding) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - ASSERT_EQUALS( 1.0, md->paddingFactor() ); - ASSERT_EQUALS( 300, rs.getRecordAllocationSize( 300 ) ); - } - - /** getRecordAllocationSize() multiplies by a padding factor > 1.0. */ - TEST(SimpleRecordStoreV1, GetRecordAllocationSizeWithPadding) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - double paddingFactor = 1.2; - md->setPaddingFactor( &txn, paddingFactor ); - ASSERT_EQUALS( paddingFactor, md->paddingFactor() ); - ASSERT_EQUALS( int(300 * paddingFactor), rs.getRecordAllocationSize( 300 ) ); - } - - /** - * getRecordAllocationSize() quantizes to the nearest power of 2 when Flag_UsePowerOf2Sizes - * is set. - */ - TEST(SimpleRecordStoreV1, GetRecordAllocationSizePowerOf2) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( - false, - RecordStoreV1Base::Flag_UsePowerOf2Sizes ); - - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - ASSERT_EQUALS( 512, rs.getRecordAllocationSize( 300 ) ); - } - - /** - * getRecordAllocationSize() quantizes to the nearest power of 2 when Flag_UsePowerOf2Sizes - * is set, ignoring the padding factor. - */ - TEST(SimpleRecordStoreV1, GetRecordAllocationSizePowerOf2PaddingIgnored) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( - false, - RecordStoreV1Base::Flag_UsePowerOf2Sizes ); - - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - md->setPaddingFactor( &txn, 2.0 ); - ASSERT_EQUALS( 2.0, md->paddingFactor() ); - ASSERT_EQUALS( 512, rs.getRecordAllocationSize( 300 ) ); - } - - // ----------------- TEST( SimpleRecordStoreV1, FullSimple1 ) { @@ -453,327 +329,6 @@ namespace { ASSERT_EQUALS( string("abc"), string(recordData.data()) ); } - // ---------------- - - /** - * Inserts take the first deleted record with the correct size. - */ - TEST( SimpleRecordStoreV1, InsertTakesFirstDeletedWithExactSize ) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - - { - LocAndSize recs[] = { - {DiskLoc(0, 1000), 100}, - {DiskLoc(0, 1100), 100}, - {DiskLoc(0, 1300), 100}, - {DiskLoc(2, 1100), 100}, - {} - }; - LocAndSize drecs[] = { - {DiskLoc(0, 1200), 100}, // this one will be used - {DiskLoc(2, 1000), 100}, - {DiskLoc(1, 1000), 1000}, - {} - }; - - initializeV1RS(&txn, recs, drecs, &em, md); - } - - rs.insertRecord(&txn, zeros, 100 - Record::HeaderSize, false); - - { - LocAndSize recs[] = { - {DiskLoc(0, 1000), 100}, - {DiskLoc(0, 1100), 100}, - {DiskLoc(0, 1300), 100}, - {DiskLoc(0, 1200), 100}, // this is the new record - {DiskLoc(2, 1100), 100}, - {} - }; - LocAndSize drecs[] = { - {DiskLoc(2, 1000), 100}, - {DiskLoc(1, 1000), 1000}, - {} - }; - assertStateV1RS(&txn, recs, drecs, &em, md); - } - } - - /** - * Test that we keep looking for better matches for 5 links once we find a non-exact match. - * This "extra" scanning does not proceed into bigger buckets. - * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents. - */ - TEST( SimpleRecordStoreV1, InsertLooksForBetterMatchUpTo5Links ) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - - { - LocAndSize recs[] = { - {} - }; - LocAndSize drecs[] = { - // This intentionally leaves gaps to keep locs readable. - {DiskLoc(0, 1000), 75}, // too small - {DiskLoc(0, 1100), 100}, // 1st big enough: will be first record - {DiskLoc(0, 1200), 100}, // 2nd: will be third record - {DiskLoc(0, 1300), 100}, // 3rd - {DiskLoc(0, 1400), 100}, // 4th - {DiskLoc(0, 1500), 100}, // 5th: first and third will stop once they look here - {DiskLoc(0, 1600), 80}, // 6th: second will make it here and use this - {DiskLoc(0, 1700), 999}, // bigger bucket. Should never look here - {} - }; - initializeV1RS(&txn, recs, drecs, &em, md); - } - - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - - { - LocAndSize recs[] = { - {DiskLoc(0, 1100), 100}, // 1st insert - {DiskLoc(0, 1600), 80}, // 2nd insert - {DiskLoc(0, 1200), 100}, // 3rd insert - {} - }; - LocAndSize drecs[] = { - {DiskLoc(0, 1000), 75}, - {DiskLoc(0, 1300), 100}, - {DiskLoc(0, 1400), 100}, - {DiskLoc(0, 1500), 100}, - {DiskLoc(0, 1700), 999}, - {} - }; - assertStateV1RS(&txn, recs, drecs, &em, md); - } - } - - /** - * Test that we stop looking in a bucket once we see 31 too small drecs. - * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents. - */ - TEST( SimpleRecordStoreV1, InsertLooksForMatchUpTo31Links ) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - - { - LocAndSize recs[] = { - {} - }; - LocAndSize drecs[] = { - // This intentionally leaves gaps to keep locs readable. - {DiskLoc(0, 1000), 50}, // different bucket - - {DiskLoc(0, 1100), 75}, // 1st too small in correct bucket - {DiskLoc(0, 1200), 75}, - {DiskLoc(0, 1300), 75}, - {DiskLoc(0, 1400), 75}, - {DiskLoc(0, 1500), 75}, - {DiskLoc(0, 1600), 75}, - {DiskLoc(0, 1700), 75}, - {DiskLoc(0, 1800), 75}, - {DiskLoc(0, 1900), 75}, - {DiskLoc(0, 2000), 75}, // 10th too small - {DiskLoc(0, 2100), 75}, - {DiskLoc(0, 2200), 75}, - {DiskLoc(0, 2300), 75}, - {DiskLoc(0, 2400), 75}, - {DiskLoc(0, 2500), 75}, - {DiskLoc(0, 2600), 75}, - {DiskLoc(0, 2700), 75}, - {DiskLoc(0, 2800), 75}, - {DiskLoc(0, 2900), 75}, - {DiskLoc(0, 3000), 75}, // 20th too small - {DiskLoc(0, 3100), 75}, - {DiskLoc(0, 3200), 75}, - {DiskLoc(0, 3300), 75}, - {DiskLoc(0, 3400), 75}, - {DiskLoc(0, 3500), 75}, - {DiskLoc(0, 3600), 75}, - {DiskLoc(0, 3700), 75}, - {DiskLoc(0, 3800), 75}, - {DiskLoc(0, 3900), 75}, - {DiskLoc(0, 4000), 75}, // 30th too small - {DiskLoc(0, 4100), 75}, // 31st too small - - {DiskLoc(0, 8000), 80}, // big enough but wont be seen until we take an earlier one - {DiskLoc(0, 9000), 140}, // bigger bucket. jumps here after seeing 31 drecs - {} - }; - initializeV1RS(&txn, recs, drecs, &em, md); - } - - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); // takes from bigger bucket - rs.insertRecord(&txn, zeros, 70 - Record::HeaderSize, false); // removes a 75-sized drec - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); // now sees big-enough drec - - { - LocAndSize recs[] = { - {DiskLoc(0, 9000), 80}, // 1st insert went here - {DiskLoc(0, 1100), 75}, // 2nd here - {DiskLoc(0, 8000), 80}, // 3rd here - {} - }; - LocAndSize drecs[] = { - {DiskLoc(0, 9000 + 80), 140 - 80}, // split off during first insert - {DiskLoc(0, 1000), 50}, - {DiskLoc(0, 1200), 75}, - {DiskLoc(0, 1300), 75}, - {DiskLoc(0, 1400), 75}, - {DiskLoc(0, 1500), 75}, - {DiskLoc(0, 1600), 75}, - {DiskLoc(0, 1700), 75}, - {DiskLoc(0, 1800), 75}, - {DiskLoc(0, 1900), 75}, - {DiskLoc(0, 2000), 75}, - {DiskLoc(0, 2100), 75}, - {DiskLoc(0, 2200), 75}, - {DiskLoc(0, 2300), 75}, - {DiskLoc(0, 2400), 75}, - {DiskLoc(0, 2500), 75}, - {DiskLoc(0, 2600), 75}, - {DiskLoc(0, 2700), 75}, - {DiskLoc(0, 2800), 75}, - {DiskLoc(0, 2900), 75}, - {DiskLoc(0, 3000), 75}, - {DiskLoc(0, 3100), 75}, - {DiskLoc(0, 3200), 75}, - {DiskLoc(0, 3300), 75}, - {DiskLoc(0, 3400), 75}, - {DiskLoc(0, 3500), 75}, - {DiskLoc(0, 3600), 75}, - {DiskLoc(0, 3700), 75}, - {DiskLoc(0, 3800), 75}, - {DiskLoc(0, 3900), 75}, - {DiskLoc(0, 4000), 75}, - {DiskLoc(0, 4100), 75}, - {} - }; - assertStateV1RS(&txn, recs, drecs, &em, md); - } - } - - /** - * Test that we stop looking in a bucket once we see 31 drecs, or look 4-past the first - * too-large match, whichever comes first. This is a combination of - * InsertLooksForBetterMatchUpTo5Links and InsertLooksForMatchUpTo31Links. - * - * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents. - */ - TEST( SimpleRecordStoreV1, InsertLooksForMatchUpTo31LinksEvenIfFoundOversizedFit ) { - OperationContextNoop txn; - DummyExtentManager em; - DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 ); - SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false ); - - { - LocAndSize recs[] = { - {} - }; - LocAndSize drecs[] = { - // This intentionally leaves gaps to keep locs readable. - {DiskLoc(0, 1000), 50}, // different bucket - - {DiskLoc(0, 1100), 75}, // 1st too small in correct bucket - {DiskLoc(0, 1200), 75}, - {DiskLoc(0, 1300), 75}, - {DiskLoc(0, 1400), 75}, - {DiskLoc(0, 1500), 75}, - {DiskLoc(0, 1600), 75}, - {DiskLoc(0, 1700), 75}, - {DiskLoc(0, 1800), 75}, - {DiskLoc(0, 1900), 75}, - {DiskLoc(0, 2000), 75}, // 10th too small - {DiskLoc(0, 2100), 75}, - {DiskLoc(0, 2200), 75}, - {DiskLoc(0, 2300), 75}, - {DiskLoc(0, 2400), 75}, - {DiskLoc(0, 2500), 75}, - {DiskLoc(0, 2600), 75}, - {DiskLoc(0, 2700), 75}, - {DiskLoc(0, 2800), 75}, - {DiskLoc(0, 2900), 75}, - {DiskLoc(0, 3000), 75}, // 20th too small - {DiskLoc(0, 3100), 75}, - {DiskLoc(0, 3200), 75}, - {DiskLoc(0, 3300), 75}, - {DiskLoc(0, 3400), 75}, - {DiskLoc(0, 3500), 75}, - {DiskLoc(0, 3600), 75}, - {DiskLoc(0, 3700), 75}, // 27th too small - - {DiskLoc(0, 7000), 95}, // 1st insert takes this - {DiskLoc(0, 7100), 95}, // 3rd insert takes this - - {DiskLoc(0, 3800), 75}, - {DiskLoc(0, 3900), 75}, // 29th too small (31st overall) - - {DiskLoc(0, 8000), 80}, // exact match. taken by 2nd insert - - {DiskLoc(0, 9000), 140}, // bigger bucket. Should never get here - {} - }; - initializeV1RS(&txn, recs, drecs, &em, md); - } - - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); - - { - LocAndSize recs[] = { - {DiskLoc(0, 7000), 95}, // 1st insert went here - {DiskLoc(0, 8000), 80}, // 2nd here - {DiskLoc(0, 7100), 95}, // 3rd here - {} - }; - LocAndSize drecs[] = { - {DiskLoc(0, 1000), 50}, - {DiskLoc(0, 1100), 75}, - {DiskLoc(0, 1200), 75}, - {DiskLoc(0, 1300), 75}, - {DiskLoc(0, 1400), 75}, - {DiskLoc(0, 1500), 75}, - {DiskLoc(0, 1600), 75}, - {DiskLoc(0, 1700), 75}, - {DiskLoc(0, 1800), 75}, - {DiskLoc(0, 1900), 75}, - {DiskLoc(0, 2000), 75}, - {DiskLoc(0, 2100), 75}, - {DiskLoc(0, 2200), 75}, - {DiskLoc(0, 2300), 75}, - {DiskLoc(0, 2400), 75}, - {DiskLoc(0, 2500), 75}, - {DiskLoc(0, 2600), 75}, - {DiskLoc(0, 2700), 75}, - {DiskLoc(0, 2800), 75}, - {DiskLoc(0, 2900), 75}, - {DiskLoc(0, 3000), 75}, - {DiskLoc(0, 3100), 75}, - {DiskLoc(0, 3200), 75}, - {DiskLoc(0, 3300), 75}, - {DiskLoc(0, 3400), 75}, - {DiskLoc(0, 3500), 75}, - {DiskLoc(0, 3600), 75}, - {DiskLoc(0, 3700), 75}, - {DiskLoc(0, 3800), 75}, - {DiskLoc(0, 3900), 75}, - {DiskLoc(0, 9000), 140}, - {} - }; - assertStateV1RS(&txn, recs, drecs, &em, md); - } - } - // ----------------- TEST( SimpleRecordStoreV1, Truncate ) { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp index ec19da43b36..50a34ac040d 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp @@ -188,15 +188,6 @@ namespace mongo { return _maxCappedDocs; } - double DummyRecordStoreV1MetaData::paddingFactor() const { - return _paddingFactor; - } - - void DummyRecordStoreV1MetaData::setPaddingFactor( OperationContext* txn, - double paddingFactor ) { - _paddingFactor = paddingFactor; - } - // ----------------------------------------- DummyExtentManager::~DummyExtentManager() { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h index a04f9b40331..352c91efd16 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h @@ -85,10 +85,6 @@ namespace mongo { virtual long long maxCappedDocs() const; - virtual double paddingFactor() const; - - virtual void setPaddingFactor( OperationContext* txn, double paddingFactor ); - protected: DiskLoc _capExtent; -- cgit v1.2.1