summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2014-09-17 18:41:18 -0400
committerMathias Stearn <mathias@10gen.com>2014-10-17 20:33:13 -0400
commit08319d688517af8ac88fe7054ea10f1f08c5bf2d (patch)
treed54d5079773b1a9d43bedb43276968664b9a0732
parent7aadbc29c07ee73c62ccfa3696fbd6262fb3d70e (diff)
downloadmongo-08319d688517af8ac88fe7054ea10f1f08c5bf2d.tar.gz
SERVER-15273 Only quantize to exact bucket sizes
Prep for new freelist implementation.
-rw-r--r--jstests/core/insert1.js3
-rw-r--r--jstests/core/profile3.js4
-rw-r--r--jstests/core/splitvector.js12
-rw-r--r--jstests/mmap_v1/datasize.js27
-rw-r--r--jstests/mmap_v1/padding.js67
-rw-r--r--jstests/mmap_v1/update.js3
-rw-r--r--jstests/mmap_v1/use_power_of_2.js6
-rw-r--r--jstests/multiVersion/libs/verify_collection_data.js6
-rw-r--r--jstests/sharding/shard_existing.js9
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp14
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp107
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h24
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp166
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp601
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h4
20 files changed, 169 insertions, 902 deletions
diff --git a/jstests/core/insert1.js b/jstests/core/insert1.js
index 745afb0949a..1c0c0f90e87 100644
--- a/jstests/core/insert1.js
+++ b/jstests/core/insert1.js
@@ -38,6 +38,3 @@ assert.writeOK(t.insert(toInsert));
doc = t.findOne({_id:1});
assert.eq(5, doc.a);
assert.eq(count, t.count(), "bad count");
-
-var stats = db.runCommand({ collstats: "insert1" });
-assert( stats.paddingFactor == undefined || stats.paddingFactor == 1.0);
diff --git a/jstests/core/profile3.js b/jstests/core/profile3.js
index baa11efbd58..0e1a731f41d 100644
--- a/jstests/core/profile3.js
+++ b/jstests/core/profile3.js
@@ -26,11 +26,11 @@ try {
db.setProfilingLevel(2);
- db.createCollection(t.getName(), {usePowerOf2Sizes: false});
+ db.createCollection(t.getName());
t.insert( { x : 1 } );
t.findOne( { x : 1 } );
t.find( { x : 1 } ).count();
- t.update( { x : 1 }, {$inc:{a:1}} );
+ t.update( { x : 1 }, {$inc:{a:1}, $set: {big: Array(128).toString()}} );
t.update( { x : 1 }, {$inc:{a:1}} );
t.update( { x : 0 }, {$inc:{a:1}} );
diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js
index eb7059bfcc0..9665ded9f18 100644
--- a/jstests/core/splitvector.js
+++ b/jstests/core/splitvector.js
@@ -17,14 +17,18 @@ assertChunkSizes = function ( splitVec , numDocs , maxChunkSize , msg ){
for ( i=0; i<splitVec.length-1; i++) {
min = splitVec[i];
max = splitVec[i+1];
+ var avgObjSize = db.jstests_splitvector.stats().avgObjSize;
size = db.runCommand( { datasize: "test.jstests_splitvector" , min: min , max: max } ).size;
// It is okay for the last chunk to be smaller. A collection's size does not
// need to be exactly a multiple of maxChunkSize.
- if ( i < splitVec.length - 2 )
- assert.close( maxChunkSize , size , "A"+i , -3 );
- else
+ if ( i < splitVec.length - 2 ) {
+ // We are within one object of the correct chunk size.
+ assert.lt( Math.abs(maxChunkSize - size), avgObjSize , "A"+i );
+ }
+ else {
assert.gt( maxChunkSize , size , "A"+i , msg + "b" );
+ }
}
}
@@ -45,10 +49,8 @@ var assertFieldNamesMatch = function( splitPoint , keyPattern ){
}
}
-// turn off powerOf2, this test checks regular allocation
var resetCollection = function() {
f.drop();
- db.createCollection(f.getName(), {usePowerOf2Sizes: false});
}
// -------------------------
diff --git a/jstests/mmap_v1/datasize.js b/jstests/mmap_v1/datasize.js
index 13e9f11bf0c..85c32413b61 100644
--- a/jstests/mmap_v1/datasize.js
+++ b/jstests/mmap_v1/datasize.js
@@ -2,34 +2,33 @@
f = db.jstests_datasize;
f.drop();
-// this test requires usePowerOf2Sizes to be off
-db.createCollection( f.getName(), { usePowerOf2Sizes: false } );
-assert.eq(0, f.stats().userFlags);
-
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
-assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+printjson(f.stats());
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'fg'} );
-assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+printjson(f.stats());
+assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.drop();
-db.createCollection( f.getName(), { usePowerOf2Sizes: false} );
f.ensureIndex( {qq:1} );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
-assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+printjson(f.stats());
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'fg'} );
-assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+printjson(f.stats());
+assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}} ).ok );
-assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'z' }} ).size );
-assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }} ).size );
-assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{qq:1}} ).size );
-assert.eq( 36, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'d'}, max:{qq:'z' }, keyPattern:{qq:1}} ).size );
+assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'z' }} ).size );
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }} ).size );
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{qq:1}} ).size );
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'d'}, max:{qq:'z' }, keyPattern:{qq:1}} ).size );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'c' }} ).size );
-assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'d' }} ).size );
+assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'d' }} ).size );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{a:1}} ).ok );
diff --git a/jstests/mmap_v1/padding.js b/jstests/mmap_v1/padding.js
deleted file mode 100644
index 6bd673e57ed..00000000000
--- a/jstests/mmap_v1/padding.js
+++ /dev/null
@@ -1,67 +0,0 @@
-p = db.getCollection("padding");
-p.drop();
-
-// this test requires usePowerOf2Sizes to be off
-db.createCollection( p.getName(), { "usePowerOf2Sizes" : false } );
-assert.eq(0, p.stats().userFlags);
-
-for (var i = 0; i < 1000; i++) {
- p.insert({ x: 1, y: "aaaaaaaaaaaaaaa" });
-}
-
-assert.eq(p.stats().paddingFactor, 1, "Padding Not 1");
-
-for (var i = 0; i < 1000; i++) {
- var x = p.findOne();
- x.y = x.y + "aaaaaaaaaaaaaaaa";
- p.update({}, x);
- if (i % 100 == 0)
-
- print(p.stats().paddingFactor);
-}
-
-assert.gt(p.stats().paddingFactor, 1.9, "Padding not > 1.9");
-
-// this should make it go down
-for (var i = 0; i < 1000; i++) {
- p.update({}, { $inc: { x: 1} });
- if (i % 100 == 0)
- print(p.stats().paddingFactor);
-}
-assert.lt(p.stats().paddingFactor, 1.7, "Padding not < 1.7");
-
-for (var i = 0; i < 1000; i++) {
- if (i % 2 == 0) {
- p.update({}, { $inc: { x: 1} });
- }
- else {
- var x = p.findOne();
- x.y = x.y + "aaaaaaaaaaaaaaaa";
- p.update({}, x);
- }
- if( i % 100 == 0 )
- print(p.stats().paddingFactor);
-}
-var ps = p.stats().paddingFactor;
-assert.gt(ps, 1.7, "Padding not greater than 1.7");
-assert.lt(ps, 1.9, "Padding not less than 1.9");
-
-// 50/50 inserts and nonfitting updates
-for (var i = 0; i < 1000; i++) {
- if (i % 2 == 0) {
- p.insert({});
- }
- else {
- var x = p.findOne();
- x.y = x.y + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- p.update({}, x);
- }
- if (i % 100 == 0)
- print(p.stats().paddingFactor);
-}
-
-// should have trended somewhat higher over the above.
-// speed of increase would be higher with more indexes.
-assert.gt(p.stats().paddingFactor, ps + 0.02 ,
- "padding factor not greater than value (+.02), ps: " + ps + " now: " + p.stats().paddingFactor);
-p.drop();
diff --git a/jstests/mmap_v1/update.js b/jstests/mmap_v1/update.js
index 37bf6378c64..607f46b4155 100644
--- a/jstests/mmap_v1/update.js
+++ b/jstests/mmap_v1/update.js
@@ -33,8 +33,5 @@ assert(stats.count == iterations);
assert(stats.size < 140433012 * 5 && stats.size > 1000000);
assert(stats.numExtents < 20);
assert(stats.nindexes == 1);
-var pf = stats.paddingFactor;
-print("update.js padding factor: " + pf);
-assert(pf > 1.7 && pf <= 2);
asdf.drop();
diff --git a/jstests/mmap_v1/use_power_of_2.js b/jstests/mmap_v1/use_power_of_2.js
index 8eb24233cf1..26977034763 100644
--- a/jstests/mmap_v1/use_power_of_2.js
+++ b/jstests/mmap_v1/use_power_of_2.js
@@ -1,5 +1,7 @@
/*
* This test ensures that the usePowerOf2 user flag effectively reuses space.
+ *
+ * As of SERVER-15273 usePowerOf2 is silently ignored so the behavior is the same regardless.
*/
// prepare a doc of 14K
@@ -32,7 +34,7 @@ t.drop();
db.createCollection(collName);
var res = db.runCommand( { "collMod" : collName , "usePowerOf2Sizes" : false } );
assert( res.ok, "collMod failed" );
-checkStorageSize(15344, false); // 15344 = 14369 (bsonsize) + overhead
+checkStorageSize(16*1023, true); // 15344 = 14369 (bsonsize) + overhead
t.drop();
db.createCollection(collName);
@@ -44,7 +46,7 @@ checkStorageSize(16 * 1023, true); // power of 2
// Create collection with flag
t.drop();
db.runCommand({"create" : collName, "flags" : 0 });
-checkStorageSize(15344, false);
+checkStorageSize(16*1023, true);
t.drop();
db.runCommand({"create" : collName, "flags" : 1 });
diff --git a/jstests/multiVersion/libs/verify_collection_data.js b/jstests/multiVersion/libs/verify_collection_data.js
index 98d5ec592af..dfd07c453ea 100644
--- a/jstests/multiVersion/libs/verify_collection_data.js
+++ b/jstests/multiVersion/libs/verify_collection_data.js
@@ -115,6 +115,12 @@ function CollectionDataValidator() {
delete collectionStats.systemFlags;
delete newCollectionStats.systemFlags;
+ // as of 2.7.7, we no longer use paddingFactor and introduced paddingFactorNote
+ delete collectionStats.paddingFactor;
+ delete collectionStats.paddingFactorNote;
+ delete newCollectionStats.paddingFactor;
+ delete newCollectionStats.paddingFactorNote;
+
// Delete keys that appear just because we shard
delete newCollectionStats["primary"];
delete newCollectionStats["sharded"];
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 3f4ed69832c..ab03f4e770f 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -12,22 +12,23 @@ var docSize = Object.bsonsize({ _id: numDocs, s: bigString });
var totalSize = docSize * numDocs;
print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
-// turn off powerOf2Sizes as this tests regular allocation
-db.createCollection('data', {usePowerOf2Sizes: false});
-
var bulk = db.data.initializeUnorderedBulkOp();
for (i=0; i<numDocs; i++) {
bulk.insert({_id: i, s: bigString});
}
assert.writeOK(bulk.execute());
+var avgObjSize = db.data.stats().avgObjSize;
+var dataSize = db.data.stats().size;
+assert.lt(totalSize, dataSize);
+
s.adminCommand( { enablesharding : "test" } );
res = s.adminCommand( { shardcollection : "test.data" , key : { _id : 1 } } );
printjson(res);
// number of chunks should be approx equal to the total data size / half the chunk size
var numChunks = s.config.chunks.find().itcount();
-var guess = Math.ceil(totalSize / (512 * 1024));
+var guess = Math.ceil(dataSize / (512*1024 + avgObjSize));
assert( Math.abs( numChunks - guess ) < 2, "not right number of chunks" );
s.stop();
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 5a5364403c4..970d6e29c71 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -66,7 +66,7 @@ namespace mongo {
nIndexes = 0;
isCapped = capped;
maxDocsInCapped = 0x7fffffff; // no limit (value is for pre-v2.3.2 compatibility)
- paddingFactor = 1.0;
+ paddingFactorOldDoNotUse = 1.0;
systemFlagsOldDoNotUse = 0;
userFlags = 0;
capFirstNewRecord = DiskLoc();
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index 48ad001b90b..5afcd715128 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -92,7 +92,7 @@ namespace mongo {
int maxDocsInCapped; // max # of objects for a capped table, -1 for inf.
- double paddingFactor; // 1.0 = no padding.
+ double paddingFactorOldDoNotUse;
// ofs 368 (16)
int systemFlagsOldDoNotUse; // things that the system sets/cares about
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
index 54d74ae1d2e..b78ad7bef59 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
@@ -175,20 +175,6 @@ namespace mongo {
return _details->maxDocsInCapped;
}
- double NamespaceDetailsRSV1MetaData::paddingFactor() const {
- return _details->paddingFactor;
- }
-
- void NamespaceDetailsRSV1MetaData::setPaddingFactor( OperationContext* txn, double paddingFactor ) {
- if ( paddingFactor == _details->paddingFactor )
- return;
-
- if ( _details->isCapped )
- return;
-
- *txn->recoveryUnit()->writing(&_details->paddingFactor) = paddingFactor;
- }
-
void NamespaceDetailsRSV1MetaData::_syncUserFlags( OperationContext* txn ) {
if ( !_namespaceRecordStore )
return;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
index 9f933d003e5..609691ec5ed 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
@@ -96,9 +96,6 @@ namespace mongo {
virtual long long maxCappedDocs() const;
- virtual double paddingFactor() const;
- virtual void setPaddingFactor( OperationContext* txn, double paddingFactor );
-
private:
void _syncUserFlags( OperationContext* txn );
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index 0f23a5e521e..a7ea59aea52 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -241,8 +241,8 @@ namespace mongo {
"record has to be >= 4 bytes" );
}
int lenWHdr = docSize + Record::HeaderSize;
- if ( doc->addPadding() )
- lenWHdr = getRecordAllocationSize( lenWHdr );
+ if (doc->addPadding() && !isCapped())
+ lenWHdr = quantizeAllocationSpace( lenWHdr );
StatusWith<DiskLoc> loc = allocRecord( txn, lenWHdr, enforceQuota );
if ( !loc.isOK() )
@@ -258,8 +258,6 @@ namespace mongo {
_details->incrementStats( txn, r->netLength(), 1 );
- _paddingFits( txn );
-
return loc;
}
@@ -273,11 +271,7 @@ namespace mongo {
"record has to be >= 4 bytes" );
}
- StatusWith<DiskLoc> status = _insertRecord( txn, data, len, enforceQuota );
- if ( status.isOK() )
- _paddingFits( txn );
-
- return status;
+ return _insertRecord( txn, data, len, enforceQuota );
}
StatusWith<DiskLoc> RecordStoreV1Base::_insertRecord( OperationContext* txn,
@@ -285,7 +279,9 @@ namespace mongo {
int len,
bool enforceQuota ) {
- int lenWHdr = getRecordAllocationSize( len + Record::HeaderSize );
+ int lenWHdr = len + Record::HeaderSize;
+ if (!isCapped())
+ lenWHdr = quantizeAllocationSpace( lenWHdr );
fassert( 17208, lenWHdr >= ( len + Record::HeaderSize ) );
StatusWith<DiskLoc> loc = allocRecord( txn, lenWHdr, enforceQuota );
@@ -315,7 +311,6 @@ namespace mongo {
Record* oldRecord = recordFor( oldLocation );
if ( oldRecord->netLength() >= dataSize ) {
// we fit
- _paddingFits( txn );
memcpy( txn->recoveryUnit()->writingPtr( oldRecord->data(), dataSize ), data, dataSize );
return StatusWith<DiskLoc>( oldLocation );
}
@@ -327,8 +322,6 @@ namespace mongo {
// we have to move
- _paddingTooSmall( txn );
-
StatusWith<DiskLoc> newLocation = _insertRecord( txn, data, dataSize, enforceQuota );
if ( !newLocation.isOK() )
return newLocation;
@@ -354,8 +347,6 @@ namespace mongo {
const RecordData& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages ) {
- _paddingFits( txn );
-
Record* rec = recordFor( loc );
char* root = rec->data();
@@ -519,7 +510,6 @@ namespace mongo {
output->appendNumber("datasize", _details->dataSize());
output->appendNumber("nrecords", _details->numRecords());
output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
- output->appendNumber("padding", _details->paddingFactor());
if ( _details->firstExtent(txn).isNull() )
output->append( "firstExtent", "null" );
@@ -647,7 +637,6 @@ namespace mongo {
int n = 0;
int nInvalid = 0;
long long nQuantizedSize = 0;
- long long nPowerOf2QuantizedSize = 0;
long long len = 0;
long long nlen = 0;
long long bsonLen = 0;
@@ -681,13 +670,6 @@ namespace mongo {
++nQuantizedSize;
}
- if ( r->lengthWithHeaders() ==
- quantizePowerOf2AllocationSpace( r->lengthWithHeaders() ) ) {
- // Count the number of records having a size consistent with the
- // quantizePowerOf2AllocationSpace quantization implementation.
- ++nPowerOf2QuantizedSize;
- }
-
if (full){
size_t dataSize = 0;
const Status status = adaptor->validate( r->toRecordData(), &dataSize );
@@ -720,7 +702,6 @@ namespace mongo {
}
output->appendNumber("nQuantizedSize", nQuantizedSize);
- output->appendNumber("nPowerOf2QuantizedSize", nPowerOf2QuantizedSize);
output->appendNumber("bytesWithHeaders", len);
output->appendNumber("bytesWithoutHeaders", nlen);
@@ -804,7 +785,9 @@ namespace mongo {
BSONObjBuilder* result,
double scale ) const {
result->append( "lastExtentSize", _details->lastExtentSize(txn) / scale );
- result->append( "paddingFactor", _details->paddingFactor() );
+ result->append( "paddingFactor", 1.0 ); // hard coded
+ result->append( "paddingFactorNote", "paddingFactor is unused and unmaintained in 2.8. It "
+ "remains hard coded to 1.0 for compatibility only." );
result->append( "userFlags", _details->userFlags() );
if ( isCapped() ) {
@@ -863,22 +846,6 @@ namespace mongo {
return Status::OK();
}
- int RecordStoreV1Base::getRecordAllocationSize( int minRecordSize ) const {
-
- if ( isCapped() )
- return minRecordSize;
-
- invariant( _details->paddingFactor() >= 1 );
-
- if ( _details->isUserFlagSet( Flag_UsePowerOf2Sizes ) ) {
- // quantize to the nearest bucketSize (or nearest 1mb boundary for large sizes).
- return quantizePowerOf2AllocationSpace(minRecordSize);
- }
-
- // adjust for padding factor
- return static_cast<int>(minRecordSize * _details->paddingFactor());
- }
-
DiskLoc RecordStoreV1Base::IntraExtentIterator::getNext() {
if (_curr.isNull())
return DiskLoc();
@@ -896,73 +863,31 @@ namespace mongo {
}
}
- /* @return the size for an allocated record quantized to 1/16th of the BucketSize
- @param allocSize requested size to allocate
- */
int RecordStoreV1Base::quantizeAllocationSpace(int allocSize) {
- const int bucketIdx = bucket(allocSize);
- int bucketSize = bucketSizes[bucketIdx];
- int quantizeUnit = bucketSize / 16;
- if (allocSize >= (1 << 22)) // 4mb
- // all allocatons >= 4mb result in 4mb/16 quantization units, even if >= 8mb. idea is
- // to reduce quantization overhead of large records at the cost of increasing the
- // DeletedRecord size distribution in the largest bucket by factor of 4.
- quantizeUnit = (1 << 18); // 256k
- if (allocSize % quantizeUnit == 0)
- // size is already quantized
- return allocSize;
- const int quantizedSpace = (allocSize | (quantizeUnit - 1)) + 1;
- fassert(16484, quantizedSpace >= allocSize);
- return quantizedSpace;
- }
-
- int RecordStoreV1Base::quantizePowerOf2AllocationSpace(int allocSize) {
- for ( int i = 0; i < MaxBucket; i++ ) { // skips the largest (16MB) bucket
+ for ( int i = 0; i < Buckets; i++ ) {
if ( bucketSizes[i] >= allocSize ) {
// Return the size of the first bucket sized >= the requested size.
return bucketSizes[i];
}
}
- // if we get here, it means we're allocating more than 4mb, so round up
- // to the nearest megabyte >= allocSize
- const int MB = 1024*1024;
- invariant(allocSize > 4*MB);
- return (allocSize + (MB - 1)) & ~(MB - 1); // round up to MB alignment
+ // TODO make a specific bucket large enough to hold all documents rather than doing this.
+ invariant(allocSize < bucketSizes[MaxBucket] + 1024*1024);
+ return bucketSizes[MaxBucket] + 1024*1024;
}
int RecordStoreV1Base::bucket(int size) {
for ( int i = 0; i < Buckets; i++ ) {
if ( bucketSizes[i] > size ) {
- // Return the first bucket sized _larger_ than the requested size.
+ // Return the first bucket sized _larger_ than the requested size. This is important
+ // since we want all records in a bucket to be >= the quantized size, therefore the
+ // quantized size must be the smallest allowed record per bucket.
return i;
}
}
return MaxBucket;
}
- void RecordStoreV1Base::_paddingFits( OperationContext* txn ) {
- MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less
- double x = max(1.0, _details->paddingFactor() - 0.001 );
- _details->setPaddingFactor( txn, x );
- }
- }
-
- void RecordStoreV1Base::_paddingTooSmall( OperationContext* txn ) {
- MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less
- /* the more indexes we have, the higher the cost of a move. so we take that into
- account herein. note on a move that insert() calls paddingFits(), thus
- here for example with no inserts and nIndexes = 1 we have
- .001*4-.001 or a 3:1 ratio to non moves -> 75% nonmoves. insert heavy
- can pushes this down considerably. further tweaking will be a good idea but
- this should be an adequate starting point.
- */
- double N = 4; // magic
- double x = min(2.0,_details->paddingFactor() + (0.001 * N));
- _details->setPaddingFactor( txn, x );
- }
- }
-
Status RecordStoreV1Base::setCustomOption( OperationContext* txn,
const BSONElement& option,
BSONObjBuilder* info ) {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index 5675027a37f..8c1141b0ec4 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -91,10 +91,6 @@ namespace mongo {
virtual long long maxCappedDocs() const = 0;
- virtual double paddingFactor() const = 0;
-
- virtual void setPaddingFactor( OperationContext* txn, double paddingFactor ) = 0;
-
};
class RecordStoreV1Base : public RecordStore {
@@ -177,13 +173,6 @@ namespace mongo {
const RecordStoreV1MetaData* details() const { return _details.get(); }
- /**
- * @return the actual size to create
- * will be >= oldRecordSize
- * based on padding and any other flags
- */
- int getRecordAllocationSize( int minRecordSize ) const;
-
DiskLoc getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const;
DiskLoc getNextRecord( OperationContext* txn, const DiskLoc& loc ) const;
@@ -192,16 +181,10 @@ namespace mongo {
DiskLoc getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
DiskLoc getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
- /* @return the size for an allocated record quantized to 1/16th of the BucketSize.
- @param allocSize requested size to allocate
- The returned size will be greater than or equal to 'allocSize'.
- */
- static int quantizeAllocationSpace(int allocSize);
-
/**
- * Quantize 'allocSize' to the nearest bucketSize (or nearest 1mb boundary for large sizes).
+ * Quantize 'minSize' to the nearest allocation size.
*/
- static int quantizePowerOf2AllocationSpace(int allocSize);
+ static int quantizeAllocationSpace(int minSize);
/* return which "deleted bucket" for this size object */
static int bucket(int size);
@@ -249,9 +232,6 @@ namespace mongo {
*/
void _addRecordToRecListInExtent(OperationContext* txn, Record* r, DiskLoc loc);
- void _paddingTooSmall( OperationContext* txn );
- void _paddingFits( OperationContext* txn );
-
/**
* internal
* doesn't check inputs or change padding
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index 725ecd1a6af..8d6a0672a43 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -229,7 +229,7 @@ namespace mongo {
// nIndexes preserve 0
// capped preserve true
// max preserve
- _details->setPaddingFactor( txn, 1.0 );
+ // paddingFactor is unused
_details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() );
setLastDelRecLastExtent( txn, DiskLoc().setInvalid() );
// dataFileVersion preserve
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index aca3f99df4a..4b8a51e9e87 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -54,6 +54,7 @@ namespace mongo {
static Counter64 freelistBucketExhausted;
static Counter64 freelistIterations;
+ // TODO figure out what to do about these.
static ServerStatusMetricField<Counter64> dFreelist1( "storage.freelist.search.requests",
&freelistAllocs );
@@ -72,157 +73,59 @@ namespace mongo {
invariant( !details->isCapped() );
_normalCollection = NamespaceString::normal( ns );
- if ( _details->paddingFactor() == 0 ) {
- warning() << "implicit updgrade of paddingFactor of very old collection" << endl;
- WriteUnitOfWork wunit(txn);
- _details->setPaddingFactor(txn, 1.0);
- wunit.commit();
- }
-
}
SimpleRecordStoreV1::~SimpleRecordStoreV1() {
}
DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents( OperationContext* txn,
- int lenToAlloc ) {
+ int lenToAllocRaw ) {
// align size up to a multiple of 4
- lenToAlloc = (lenToAlloc + (4-1)) & ~(4-1);
+ const int lenToAlloc = (lenToAllocRaw + (4-1)) & ~(4-1);
freelistAllocs.increment();
DiskLoc loc;
+ DeletedRecord* dr = NULL;
{
- DiskLoc *prev = 0;
- DiskLoc *bestprev = 0;
- DiskLoc bestmatch;
- int bestmatchlen = INT_MAX; // sentinel meaning we haven't found a record big enough
- int b = bucket(lenToAlloc);
- DiskLoc cur = _details->deletedListEntry(b);
-
- int extra = 5; // look for a better fit, a little.
- int chain = 0;
- while ( 1 ) {
- { // defensive check
- int fileNumber = cur.a();
- int fileOffset = cur.getOfs();
- if (fileNumber < -1 || fileNumber >= 100000 || fileOffset < 0) {
- StringBuilder sb;
- sb << "Deleted record list corrupted in collection " << _ns
- << ", bucket " << b
- << ", link number " << chain
- << ", invalid link is " << cur.toString()
- << ", throwing Fatal Assertion";
- log() << sb.str() << endl;
- fassertFailed(16469);
- }
- }
- if ( cur.isNull() ) {
- // move to next bucket. if we were doing "extra", just break
- if ( bestmatchlen < INT_MAX )
- break;
-
- if ( chain > 0 ) {
- // if we looked at things in the right bucket, but they were not suitable
- freelistBucketExhausted.increment();
- }
- b++;
- if ( b > MaxBucket ) {
- // out of space. alloc a new extent.
- freelistIterations.increment( 1 + chain );
- return DiskLoc();
- }
- cur = _details->deletedListEntry(b);
- prev = 0;
- continue;
- }
- DeletedRecord *r = drec(cur);
- if ( r->lengthWithHeaders() >= lenToAlloc &&
- r->lengthWithHeaders() < bestmatchlen ) {
- bestmatchlen = r->lengthWithHeaders();
- bestmatch = cur;
- bestprev = prev;
- if (r->lengthWithHeaders() == lenToAlloc)
- // exact match, stop searching
- break;
- }
- if ( bestmatchlen < INT_MAX && --extra <= 0 )
+ int myBucket;
+ for (myBucket = bucket(lenToAlloc); myBucket < Buckets; myBucket++) {
+ // Only look at the first entry in each bucket. This works because we are either
+ // quantizing or allocating fixed-size blocks.
+ const DiskLoc head = _details->deletedListEntry(myBucket);
+ if (head.isNull()) continue;
+ DeletedRecord* const candidate = drec(head);
+ if (candidate->lengthWithHeaders() >= lenToAlloc) {
+ loc = head;
+ dr = candidate;
break;
- if ( ++chain > 30 && b <= MaxBucket ) {
- // too slow, force move to next bucket to grab a big chunk
- //b++;
- freelistIterations.increment( chain );
- chain = 0;
- cur.Null();
- }
- else {
- cur = r->nextDeleted();
- prev = &r->nextDeleted();
}
}
- // unlink ourself from the deleted list
- DeletedRecord *bmr = drec(bestmatch);
- if ( bestprev ) {
- *txn->recoveryUnit()->writing(bestprev) = bmr->nextDeleted();
- }
- else {
- // should be the front of a free-list
- int myBucket = bucket(bmr->lengthWithHeaders());
- invariant( _details->deletedListEntry(myBucket) == bestmatch );
- _details->setDeletedListEntry(txn, myBucket, bmr->nextDeleted());
- }
- *txn->recoveryUnit()->writing(&bmr->nextDeleted()) = DiskLoc().setInvalid(); // defensive.
- invariant(bmr->extentOfs() < bestmatch.getOfs());
+ if (!dr)
+ return DiskLoc(); // no space
- freelistIterations.increment( 1 + chain );
- loc = bestmatch;
+ // Unlink ourself from the deleted list
+ _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted());
+ *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
}
- if ( loc.isNull() )
- return loc;
-
- // determine if we should chop up
+ invariant( dr->extentOfs() < loc.getOfs() );
- DeletedRecord *r = drec(loc);
+ // Split the deleted record if it has at least as much left over space as our smallest
+ // allocation size. Otherwise, just take the whole DeletedRecord.
+ const int remainingLength = dr->lengthWithHeaders() - lenToAlloc;
+ if (remainingLength >= bucketSizes[0]) {
+ txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
+ const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc);
+ DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc));
+ newDel->extentOfs() = dr->extentOfs();
+ newDel->lengthWithHeaders() = remainingLength;
+ newDel->nextDeleted().Null();
- /* note we want to grab from the front so our next pointers on disk tend
- to go in a forward direction which is important for performance. */
- int regionlen = r->lengthWithHeaders();
- invariant( r->extentOfs() < loc.getOfs() );
-
- int left = regionlen - lenToAlloc;
- if ( left < 24 || left < (lenToAlloc / 8) ) {
- // you get the whole thing.
- return loc;
+ addDeletedRec(txn, newDelLoc);
}
- // don't quantize:
- // - $ collections (indexes) as we already have those aligned the way we want SERVER-8425
- if ( _normalCollection ) {
- // we quantize here so that it only impacts newly sized records
- // this prevents oddities with older records and space re-use SERVER-8435
- lenToAlloc = std::min( r->lengthWithHeaders(),
- quantizeAllocationSpace( lenToAlloc ) );
- left = regionlen - lenToAlloc;
-
- if ( left < 24 ) {
- // you get the whole thing.
- return loc;
- }
- }
-
- /* split off some for further use. */
- txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
- DiskLoc newDelLoc = loc;
- newDelLoc.inc(lenToAlloc);
- DeletedRecord* newDel = drec(newDelLoc);
- DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel);
- newDelW->extentOfs() = r->extentOfs();
- newDelW->lengthWithHeaders() = left;
- newDelW->nextDeleted().Null();
-
- addDeletedRec( txn, newDelLoc );
return loc;
}
@@ -426,11 +329,8 @@ namespace mongo {
const unsigned minAllocationSize = rawDataSize + Record::HeaderSize;
unsigned allocationSize = minAllocationSize;
switch( compactOptions->paddingMode ) {
- case CompactOptions::NONE: // no padding, unless using powerOf2Sizes
- if ( _details->isUserFlagSet(Flag_UsePowerOf2Sizes) )
- allocationSize = quantizePowerOf2AllocationSpace(minAllocationSize);
- else
- allocationSize = minAllocationSize;
+ case CompactOptions::NONE: // default padding
+ allocationSize = quantizeAllocationSpace(minAllocationSize);
break;
case CompactOptions::PRESERVE: // keep original padding
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index 150611e1201..6b5dd87bf1d 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -40,58 +40,24 @@ using namespace mongo;
namespace {
- // Provides data to be inserted. Must be large enough for largest possible record.
- // Should be in BSS so unused portions should be free.
- char zeros[20*1024*1024] = {};
-
TEST( SimpleRecordStoreV1, quantizeAllocationSpaceSimple ) {
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 36);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 10240);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 106496);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1048576);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 10223616);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 64);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 16*1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 128*1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1024*1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 16*1024*1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16*1024*1024), 16*1024*1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16*1024*1024 + 1), 17*1024*1024);
}
TEST( SimpleRecordStoreV1, quantizeAllocationMinMaxBound ) {
const int maxSize = 16 * 1024 * 1024;
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 2);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 32);
ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(maxSize), maxSize);
}
/**
- * Test Quantize record allocation on every boundary, as well as boundary-1
- * @see NamespaceDetails::quantizeAllocationSpace()
- */
- TEST( SimpleRecordStoreV1, quantizeAllocationBoundary ) {
- for (int iBucket = 0; iBucket <= RecordStoreV1Base::MaxBucket; ++iBucket) {
- // for each bucket in range [min, max)
- const int bucketSize = RecordStoreV1Base::bucketSizes[iBucket];
- const int prevBucketSize =
- (iBucket - 1 >= 0) ? RecordStoreV1Base::bucketSizes[iBucket - 1] : 0;
- const int intervalSize = bucketSize / 16;
- for (int iBoundary = prevBucketSize;
- iBoundary < bucketSize;
- iBoundary += intervalSize) {
- // for each quantization boundary within the bucket
- for (int iSize = iBoundary - 1; iSize <= iBoundary; ++iSize) {
- // test the quantization boundary - 1, and the boundary itself
- const int quantized =
- RecordStoreV1Base::quantizeAllocationSpace(iSize);
- // assert quantized size is greater than or equal to requested size
- ASSERT(quantized >= iSize);
- // assert quantized size is within one quantization interval of
- // the requested size
- ASSERT(quantized - iSize <= intervalSize);
- // assert quantization is an idempotent operation
- ASSERT(quantized ==
- RecordStoreV1Base::quantizeAllocationSpace(quantized));
- }
- }
- }
- }
-
- /**
* For buckets up to 4MB powerOf2 allocation should round up to next power of 2. It should be
* return the input unmodified if it is already a power of 2.
*/
@@ -103,45 +69,20 @@ namespace {
// size - 1 is quantized to size.
ASSERT_EQUALS( size,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( size - 1 ) );
+ RecordStoreV1Base::quantizeAllocationSpace( size - 1 ) );
// size is quantized to size.
ASSERT_EQUALS( size,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( size ) );
+ RecordStoreV1Base::quantizeAllocationSpace( size ) );
// size + 1 is quantized to nextSize (unless > 4MB which is covered by next test)
if (size < 4*1024*1024) {
ASSERT_EQUALS( nextSize,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( size + 1 ) );
+ RecordStoreV1Base::quantizeAllocationSpace( size + 1 ) );
}
}
}
- /**
- * Within the largest bucket, quantizePowerOf2AllocationSpace quantizes to the nearest
- * megabyte boundary.
- */
- TEST( SimpleRecordStoreV1, SimpleRecordLargePowerOf2ToMegabyteBoundary ) {
- // Iterate iSize over all 1mb boundaries from the size of the next to largest bucket
- // to the size of the largest bucket + 1mb.
- for( int iSize = RecordStoreV1Base::bucketSizes[ RecordStoreV1Base::MaxBucket - 1 ];
- iSize <= RecordStoreV1Base::bucketSizes[ RecordStoreV1Base::MaxBucket ] + 0x100000;
- iSize += 0x100000 ) {
-
- // iSize - 1 is quantized to iSize.
- ASSERT_EQUALS( iSize,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize - 1 ) );
-
- // iSize is quantized to iSize.
- ASSERT_EQUALS( iSize,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize ) );
-
- // iSize + 1 is quantized to iSize + 1mb.
- ASSERT_EQUALS( iSize + 0x100000,
- RecordStoreV1Base::quantizePowerOf2AllocationSpace( iSize + 1 ) );
- }
- }
-
BSONObj docForRecordSize( int size ) {
BSONObjBuilder b;
b.append( "_id", 5 );
@@ -151,6 +92,19 @@ namespace {
return x;
}
+ class BsonDocWriter : public DocWriter {
+ public:
+ BsonDocWriter(const BSONObj& obj, bool padding) : _obj(obj), _padding(padding) {}
+
+ virtual void writeDocument(char* buf) const { memcpy(buf, _obj.objdata(), _obj.objsize()); }
+ virtual size_t documentSize() const { return _obj.objsize(); }
+ virtual bool addPadding() const { return _padding; }
+
+ private:
+ BSONObj _obj;
+ bool _padding;
+ };
+
/** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */
TEST(SimpleRecordStoreV1, AllocQuantized) {
OperationContextNoop txn;
@@ -165,32 +119,30 @@ namespace {
ASSERT( result.isOK() );
// The length of the allocated record is quantized.
- ASSERT_EQUALS( 320, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
+ ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
- /**
- * alloc() does not quantize records in index collections using quantizeAllocationSpace()
- * rules.
- */
- TEST(SimpleRecordStoreV1, AllocIndexNamespaceNotQuantized) {
+ /** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */
+ TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false );
+ string myns = "test.AllocQuantized";
+ SimpleRecordStoreV1 rs( &txn, myns, md, &em, false );
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), true);
+ StatusWith<DiskLoc> result = rs.insertRecord(&txn, &docWriter, false);
ASSERT( result.isOK() );
- // The length of the allocated record is not quantized.
- ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
-
+ // The length of the allocated record is quantized.
+ ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
- /** alloc() quantizes records in index collections to the nearest multiple of 4. */
- TEST(SimpleRecordStoreV1, AllocIndexNamespaceSlightlyQuantized) {
+ /**
+ * alloc() does not quantize records if DocWriter::addPadding() returns false
+ */
+ TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
@@ -198,46 +150,34 @@ namespace {
string myns = "test.AllocIndexNamespaceNotQuantized";
SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false );
- BSONObj obj = docForRecordSize( 298 );
- StatusWith<DiskLoc> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), false);
+ StatusWith<DiskLoc> result = rs.insertRecord(&txn, &docWriter, false);
ASSERT( result.isOK() );
+ // The length of the allocated record is not quantized.
ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
+
}
- /** alloc() returns a non quantized record larger than the requested size. */
- TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecord) {
+ /** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */
+ TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 310},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, &em, md);
- }
+ string myns = "test.AllocIndexNamespaceNotQuantized";
+ SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false );
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
- ASSERT_OK( actualLocation.getStatus() );
+ BsonDocWriter docWriter(docForRecordSize( 298 ), false);
+ StatusWith<DiskLoc> result = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT( result.isOK() );
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 310},
- {}
- };
- LocAndSize drecs[] = {
- {}
- };
- assertStateV1RS(&txn, recs, drecs, &em, md);
- }
+ ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + Record::HeaderSize );
}
-
- /** alloc() returns a non quantized record equal to the requested size. */
- TEST(SimpleRecordStoreV1, AllocExactSizeNonQuantizedDeletedRecord) {
+ /**
+ * alloc() with quantized size doesn't split if enough room left over.
+ */
+ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
@@ -245,19 +185,19 @@ namespace {
{
LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 300},
+ {DiskLoc(0, 1000), 512 + 31},
{}
};
initializeV1RS(&txn, NULL, drecs, &em, md);
}
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), true);
+ StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, &docWriter, false);
ASSERT_OK( actualLocation.getStatus() );
{
LocAndSize recs[] = {
- {DiskLoc(0, 1000), 300},
+ {DiskLoc(0, 1000), 512 + 31},
{}
};
LocAndSize drecs[] = {
@@ -268,10 +208,9 @@ namespace {
}
/**
- * alloc() returns a non quantized record equal to the quantized size plus some extra space
- * too small to make a DeletedRecord.
+ * alloc() with quantized size splits if enough room left over.
*/
- TEST(SimpleRecordStoreV1, AllocQuantizedWithExtra) {
+ TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
@@ -279,22 +218,23 @@ namespace {
{
LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 343},
+ {DiskLoc(0, 1000), 512 + 32},
{}
};
initializeV1RS(&txn, NULL, drecs, &em, md);
}
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), true);
+ StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, &docWriter, false);
ASSERT_OK( actualLocation.getStatus() );
{
LocAndSize recs[] = {
- {DiskLoc(0, 1000), 343},
+ {DiskLoc(0, 1000), 512},
{}
};
LocAndSize drecs[] = {
+ {DiskLoc(0, 1512), 32},
{}
};
assertStateV1RS(&txn, recs, drecs, &em, md);
@@ -302,10 +242,9 @@ namespace {
}
/**
- * alloc() returns a quantized record when the extra space in the reclaimed deleted record
- * is large enough to form a new deleted record.
+ * alloc() with non quantized size doesn't split if enough room left over.
*/
- TEST(SimpleRecordStoreV1, AllocQuantizedWithoutExtra) {
+ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
@@ -313,26 +252,22 @@ namespace {
{
LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 344},
+ {DiskLoc(0, 1000), 331},
{}
};
initializeV1RS(&txn, NULL, drecs, &em, md);
}
-
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), false);
+ StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, &docWriter, false);
ASSERT_OK( actualLocation.getStatus() );
{
LocAndSize recs[] = {
- // The returned record is quantized from 300 to 320.
- {DiskLoc(0, 1000), 320},
+ {DiskLoc(0, 1000), 331},
{}
};
LocAndSize drecs[] = {
- // A new 24 byte deleted record is split off.
- {DiskLoc(0, 1320), 24},
{}
};
assertStateV1RS(&txn, recs, drecs, &em, md);
@@ -340,10 +275,9 @@ namespace {
}
/**
- * A non quantized deleted record within 1/8 of the requested size is returned as is, even
- * if a quantized portion of the deleted record could be used instead.
+ * alloc() with non quantized size splits if enough room left over.
*/
- TEST(SimpleRecordStoreV1, AllocNotQuantizedNearDeletedSize) {
+ TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
OperationContextNoop txn;
DummyExtentManager em;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
@@ -351,87 +285,29 @@ namespace {
{
LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 344},
+ {DiskLoc(0, 1000), 332},
{}
};
initializeV1RS(&txn, NULL, drecs, &em, md);
}
- BSONObj obj = docForRecordSize( 319 );
- StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ BsonDocWriter docWriter(docForRecordSize( 300 ), false);
+ StatusWith<DiskLoc> actualLocation = rs.insertRecord(&txn, &docWriter, false);
ASSERT_OK( actualLocation.getStatus() );
- // Even though 319 would be quantized to 320 and 344 - 320 == 24 could become a new
- // deleted record, the entire deleted record is returned because
- // ( 344 - 320 ) < ( 320 / 8 ).
-
{
LocAndSize recs[] = {
- {DiskLoc(0, 1000), 344},
+ {DiskLoc(0, 1000), 300},
{}
};
LocAndSize drecs[] = {
+ {DiskLoc(0, 1300), 32},
{}
};
assertStateV1RS(&txn, recs, drecs, &em, md);
}
}
- /** getRecordAllocationSize() returns its argument when the padding factor is 1.0. */
- TEST(SimpleRecordStoreV1, GetRecordAllocationSizeNoPadding) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
- ASSERT_EQUALS( 1.0, md->paddingFactor() );
- ASSERT_EQUALS( 300, rs.getRecordAllocationSize( 300 ) );
- }
-
- /** getRecordAllocationSize() multiplies by a padding factor > 1.0. */
- TEST(SimpleRecordStoreV1, GetRecordAllocationSizeWithPadding) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
- double paddingFactor = 1.2;
- md->setPaddingFactor( &txn, paddingFactor );
- ASSERT_EQUALS( paddingFactor, md->paddingFactor() );
- ASSERT_EQUALS( int(300 * paddingFactor), rs.getRecordAllocationSize( 300 ) );
- }
-
- /**
- * getRecordAllocationSize() quantizes to the nearest power of 2 when Flag_UsePowerOf2Sizes
- * is set.
- */
- TEST(SimpleRecordStoreV1, GetRecordAllocationSizePowerOf2) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(
- false,
- RecordStoreV1Base::Flag_UsePowerOf2Sizes );
-
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
- ASSERT_EQUALS( 512, rs.getRecordAllocationSize( 300 ) );
- }
-
- /**
- * getRecordAllocationSize() quantizes to the nearest power of 2 when Flag_UsePowerOf2Sizes
- * is set, ignoring the padding factor.
- */
- TEST(SimpleRecordStoreV1, GetRecordAllocationSizePowerOf2PaddingIgnored) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(
- false,
- RecordStoreV1Base::Flag_UsePowerOf2Sizes );
-
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
- md->setPaddingFactor( &txn, 2.0 );
- ASSERT_EQUALS( 2.0, md->paddingFactor() );
- ASSERT_EQUALS( 512, rs.getRecordAllocationSize( 300 ) );
- }
-
-
// -----------------
TEST( SimpleRecordStoreV1, FullSimple1 ) {
@@ -453,327 +329,6 @@ namespace {
ASSERT_EQUALS( string("abc"), string(recordData.data()) );
}
- // ----------------
-
- /**
- * Inserts take the first deleted record with the correct size.
- */
- TEST( SimpleRecordStoreV1, InsertTakesFirstDeletedWithExactSize ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(2, 1100), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1200), 100}, // this one will be used
- {DiskLoc(2, 1000), 100},
- {DiskLoc(1, 1000), 1000},
- {}
- };
-
- initializeV1RS(&txn, recs, drecs, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 100 - Record::HeaderSize, false);
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1200), 100}, // this is the new record
- {DiskLoc(2, 1100), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(2, 1000), 100},
- {DiskLoc(1, 1000), 1000},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, &em, md);
- }
- }
-
- /**
- * Test that we keep looking for better matches for 5 links once we find a non-exact match.
- * This "extra" scanning does not proceed into bigger buckets.
- * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents.
- */
- TEST( SimpleRecordStoreV1, InsertLooksForBetterMatchUpTo5Links ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize recs[] = {
- {}
- };
- LocAndSize drecs[] = {
- // This intentionally leaves gaps to keep locs readable.
- {DiskLoc(0, 1000), 75}, // too small
- {DiskLoc(0, 1100), 100}, // 1st big enough: will be first record
- {DiskLoc(0, 1200), 100}, // 2nd: will be third record
- {DiskLoc(0, 1300), 100}, // 3rd
- {DiskLoc(0, 1400), 100}, // 4th
- {DiskLoc(0, 1500), 100}, // 5th: first and third will stop once they look here
- {DiskLoc(0, 1600), 80}, // 6th: second will make it here and use this
- {DiskLoc(0, 1700), 999}, // bigger bucket. Should never look here
- {}
- };
- initializeV1RS(&txn, recs, drecs, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1100), 100}, // 1st insert
- {DiskLoc(0, 1600), 80}, // 2nd insert
- {DiskLoc(0, 1200), 100}, // 3rd insert
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 75},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {DiskLoc(0, 1500), 100},
- {DiskLoc(0, 1700), 999},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, &em, md);
- }
- }
-
- /**
- * Test that we stop looking in a bucket once we see 31 too small drecs.
- * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents.
- */
- TEST( SimpleRecordStoreV1, InsertLooksForMatchUpTo31Links ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize recs[] = {
- {}
- };
- LocAndSize drecs[] = {
- // This intentionally leaves gaps to keep locs readable.
- {DiskLoc(0, 1000), 50}, // different bucket
-
- {DiskLoc(0, 1100), 75}, // 1st too small in correct bucket
- {DiskLoc(0, 1200), 75},
- {DiskLoc(0, 1300), 75},
- {DiskLoc(0, 1400), 75},
- {DiskLoc(0, 1500), 75},
- {DiskLoc(0, 1600), 75},
- {DiskLoc(0, 1700), 75},
- {DiskLoc(0, 1800), 75},
- {DiskLoc(0, 1900), 75},
- {DiskLoc(0, 2000), 75}, // 10th too small
- {DiskLoc(0, 2100), 75},
- {DiskLoc(0, 2200), 75},
- {DiskLoc(0, 2300), 75},
- {DiskLoc(0, 2400), 75},
- {DiskLoc(0, 2500), 75},
- {DiskLoc(0, 2600), 75},
- {DiskLoc(0, 2700), 75},
- {DiskLoc(0, 2800), 75},
- {DiskLoc(0, 2900), 75},
- {DiskLoc(0, 3000), 75}, // 20th too small
- {DiskLoc(0, 3100), 75},
- {DiskLoc(0, 3200), 75},
- {DiskLoc(0, 3300), 75},
- {DiskLoc(0, 3400), 75},
- {DiskLoc(0, 3500), 75},
- {DiskLoc(0, 3600), 75},
- {DiskLoc(0, 3700), 75},
- {DiskLoc(0, 3800), 75},
- {DiskLoc(0, 3900), 75},
- {DiskLoc(0, 4000), 75}, // 30th too small
- {DiskLoc(0, 4100), 75}, // 31st too small
-
- {DiskLoc(0, 8000), 80}, // big enough but wont be seen until we take an earlier one
- {DiskLoc(0, 9000), 140}, // bigger bucket. jumps here after seeing 31 drecs
- {}
- };
- initializeV1RS(&txn, recs, drecs, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); // takes from bigger bucket
- rs.insertRecord(&txn, zeros, 70 - Record::HeaderSize, false); // removes a 75-sized drec
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false); // now sees big-enough drec
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 9000), 80}, // 1st insert went here
- {DiskLoc(0, 1100), 75}, // 2nd here
- {DiskLoc(0, 8000), 80}, // 3rd here
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 9000 + 80), 140 - 80}, // split off during first insert
- {DiskLoc(0, 1000), 50},
- {DiskLoc(0, 1200), 75},
- {DiskLoc(0, 1300), 75},
- {DiskLoc(0, 1400), 75},
- {DiskLoc(0, 1500), 75},
- {DiskLoc(0, 1600), 75},
- {DiskLoc(0, 1700), 75},
- {DiskLoc(0, 1800), 75},
- {DiskLoc(0, 1900), 75},
- {DiskLoc(0, 2000), 75},
- {DiskLoc(0, 2100), 75},
- {DiskLoc(0, 2200), 75},
- {DiskLoc(0, 2300), 75},
- {DiskLoc(0, 2400), 75},
- {DiskLoc(0, 2500), 75},
- {DiskLoc(0, 2600), 75},
- {DiskLoc(0, 2700), 75},
- {DiskLoc(0, 2800), 75},
- {DiskLoc(0, 2900), 75},
- {DiskLoc(0, 3000), 75},
- {DiskLoc(0, 3100), 75},
- {DiskLoc(0, 3200), 75},
- {DiskLoc(0, 3300), 75},
- {DiskLoc(0, 3400), 75},
- {DiskLoc(0, 3500), 75},
- {DiskLoc(0, 3600), 75},
- {DiskLoc(0, 3700), 75},
- {DiskLoc(0, 3800), 75},
- {DiskLoc(0, 3900), 75},
- {DiskLoc(0, 4000), 75},
- {DiskLoc(0, 4100), 75},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, &em, md);
- }
- }
-
- /**
- * Test that we stop looking in a bucket once we see 31 drecs, or look 4-past the first
- * too-large match, whichever comes first. This is a combination of
- * InsertLooksForBetterMatchUpTo5Links and InsertLooksForMatchUpTo31Links.
- *
- * WARNING: this test depends on magic numbers inside RSV1Simple::_allocFromExistingExtents.
- */
- TEST( SimpleRecordStoreV1, InsertLooksForMatchUpTo31LinksEvenIfFoundOversizedFit ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize recs[] = {
- {}
- };
- LocAndSize drecs[] = {
- // This intentionally leaves gaps to keep locs readable.
- {DiskLoc(0, 1000), 50}, // different bucket
-
- {DiskLoc(0, 1100), 75}, // 1st too small in correct bucket
- {DiskLoc(0, 1200), 75},
- {DiskLoc(0, 1300), 75},
- {DiskLoc(0, 1400), 75},
- {DiskLoc(0, 1500), 75},
- {DiskLoc(0, 1600), 75},
- {DiskLoc(0, 1700), 75},
- {DiskLoc(0, 1800), 75},
- {DiskLoc(0, 1900), 75},
- {DiskLoc(0, 2000), 75}, // 10th too small
- {DiskLoc(0, 2100), 75},
- {DiskLoc(0, 2200), 75},
- {DiskLoc(0, 2300), 75},
- {DiskLoc(0, 2400), 75},
- {DiskLoc(0, 2500), 75},
- {DiskLoc(0, 2600), 75},
- {DiskLoc(0, 2700), 75},
- {DiskLoc(0, 2800), 75},
- {DiskLoc(0, 2900), 75},
- {DiskLoc(0, 3000), 75}, // 20th too small
- {DiskLoc(0, 3100), 75},
- {DiskLoc(0, 3200), 75},
- {DiskLoc(0, 3300), 75},
- {DiskLoc(0, 3400), 75},
- {DiskLoc(0, 3500), 75},
- {DiskLoc(0, 3600), 75},
- {DiskLoc(0, 3700), 75}, // 27th too small
-
- {DiskLoc(0, 7000), 95}, // 1st insert takes this
- {DiskLoc(0, 7100), 95}, // 3rd insert takes this
-
- {DiskLoc(0, 3800), 75},
- {DiskLoc(0, 3900), 75}, // 29th too small (31st overall)
-
- {DiskLoc(0, 8000), 80}, // exact match. taken by 2nd insert
-
- {DiskLoc(0, 9000), 140}, // bigger bucket. Should never get here
- {}
- };
- initializeV1RS(&txn, recs, drecs, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 80 - Record::HeaderSize, false);
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 7000), 95}, // 1st insert went here
- {DiskLoc(0, 8000), 80}, // 2nd here
- {DiskLoc(0, 7100), 95}, // 3rd here
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 50},
- {DiskLoc(0, 1100), 75},
- {DiskLoc(0, 1200), 75},
- {DiskLoc(0, 1300), 75},
- {DiskLoc(0, 1400), 75},
- {DiskLoc(0, 1500), 75},
- {DiskLoc(0, 1600), 75},
- {DiskLoc(0, 1700), 75},
- {DiskLoc(0, 1800), 75},
- {DiskLoc(0, 1900), 75},
- {DiskLoc(0, 2000), 75},
- {DiskLoc(0, 2100), 75},
- {DiskLoc(0, 2200), 75},
- {DiskLoc(0, 2300), 75},
- {DiskLoc(0, 2400), 75},
- {DiskLoc(0, 2500), 75},
- {DiskLoc(0, 2600), 75},
- {DiskLoc(0, 2700), 75},
- {DiskLoc(0, 2800), 75},
- {DiskLoc(0, 2900), 75},
- {DiskLoc(0, 3000), 75},
- {DiskLoc(0, 3100), 75},
- {DiskLoc(0, 3200), 75},
- {DiskLoc(0, 3300), 75},
- {DiskLoc(0, 3400), 75},
- {DiskLoc(0, 3500), 75},
- {DiskLoc(0, 3600), 75},
- {DiskLoc(0, 3700), 75},
- {DiskLoc(0, 3800), 75},
- {DiskLoc(0, 3900), 75},
- {DiskLoc(0, 9000), 140},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, &em, md);
- }
- }
-
// -----------------
TEST( SimpleRecordStoreV1, Truncate ) {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index ec19da43b36..50a34ac040d 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -188,15 +188,6 @@ namespace mongo {
return _maxCappedDocs;
}
- double DummyRecordStoreV1MetaData::paddingFactor() const {
- return _paddingFactor;
- }
-
- void DummyRecordStoreV1MetaData::setPaddingFactor( OperationContext* txn,
- double paddingFactor ) {
- _paddingFactor = paddingFactor;
- }
-
// -----------------------------------------
DummyExtentManager::~DummyExtentManager() {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
index a04f9b40331..352c91efd16 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
@@ -85,10 +85,6 @@ namespace mongo {
virtual long long maxCappedDocs() const;
- virtual double paddingFactor() const;
-
- virtual void setPaddingFactor( OperationContext* txn, double paddingFactor );
-
protected:
DiskLoc _capExtent;