summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Pasette <dan@10gen.com>2014-01-06 23:53:21 -0500
committerDan Pasette <dan@mongodb.com>2014-01-21 10:09:57 -0500
commit5402118336e74b96ea50002e43dd393d219866ec (patch)
tree092083a99bcefeec7fb7cb1534f4a1d91fbe8b61
parentdf45324b26d66f8a5155a0a9799b0ba000544ebb (diff)
downloadmongo-5402118336e74b96ea50002e43dd393d219866ec.tar.gz
SERVER-12088 Enable usePowerOf2Sizes as server-wide default
-rw-r--r--jstests/auth/user_management_commands.js16
-rw-r--r--jstests/collmod.js17
-rw-r--r--jstests/compactPreservePadding.js22
-rw-r--r--jstests/datasize.js7
-rw-r--r--jstests/fts1.js6
-rw-r--r--jstests/padding.js4
-rw-r--r--jstests/profile3.js1
-rw-r--r--jstests/sharding/authCommands.js2
-rw-r--r--jstests/sharding/localhostAuthBypass.js4
-rw-r--r--jstests/sharding/migrateBig.js4
-rw-r--r--jstests/sharding/shard_existing.js5
-rw-r--r--jstests/sharding/user_flags_sharded.js10
-rw-r--r--jstests/slowNightly/ttl_repl.js5
-rw-r--r--jstests/splitvector.js45
-rw-r--r--jstests/tool/dumprestore_auth.js7
-rw-r--r--src/mongo/db/catalog/database.cpp22
-rw-r--r--src/mongo/db/pdfile.cpp4
-rw-r--r--src/mongo/dbtests/namespacetests.cpp5
-rw-r--r--src/mongo/shell/db.js3
19 files changed, 126 insertions, 63 deletions
diff --git a/jstests/auth/user_management_commands.js b/jstests/auth/user_management_commands.js
index 931feae7e69..5f94f4aa217 100644
--- a/jstests/auth/user_management_commands.js
+++ b/jstests/auth/user_management_commands.js
@@ -145,6 +145,7 @@ function runTest(conn) {
jsTestLog("Testing usersInfo");
var res = testUserAdmin.runCommand({usersInfo: 'spencer'});
+ printjson(res);
assert.eq(1, res.users.length);
assert.eq(10036, res.users[0].customData.zipCode);
@@ -153,9 +154,19 @@ function runTest(conn) {
assert.eq(10036, res.users[0].customData.zipCode);
res = testUserAdmin.runCommand({usersInfo: ['spencer', {user: 'userAdmin', db: 'admin'}]});
+ printjson(res);
assert.eq(2, res.users.length);
- assert.eq(10036, res.users[0].customData.zipCode);
- assert(res.users[1].customData.userAdmin);
+ if (res.users[0].user == "spencer") {
+ assert.eq(10036, res.users[0].customData.zipCode);
+ assert(res.users[1].customData.userAdmin);
+ } else if (res.users[0].user == "userAdmin") {
+ assert.eq(10036, res.users[1].customData.zipCode);
+ assert(res.users[0].customData.userAdmin);
+ } else {
+ doassert("Expected user names returned by usersInfo to be either 'userAdmin' or 'spencer', "
+ + "but got: " + res.users[0].user);
+ }
+
res = testUserAdmin.runCommand({usersInfo: 1});
assert.eq(2, res.users.length);
@@ -177,6 +188,7 @@ function runTest(conn) {
jsTest.log('Test standalone');
var conn = MongoRunner.runMongod({ auth: '' });
+conn.getDB('admin').runCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
runTest(conn);
MongoRunner.stopMongod(conn.port);
diff --git a/jstests/collmod.js b/jstests/collmod.js
index 198c4bf8c02..2dc5555f3ec 100644
--- a/jstests/collmod.js
+++ b/jstests/collmod.js
@@ -12,17 +12,18 @@ t.drop();
db.createCollection( coll );
-// Verify the new collection has userFlags set to 0
-assert.eq( t.stats().userFlags , 0 , "fresh collection doesn't have userFlags = 0 ");
+// Verify the new collection has userFlags set to 1
+printjson(t.stats());
+assert.eq( t.stats().userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
-// Modify the collection with the usePowerOf2Sizes flag. Verify userFlags now = 1.
-var res = db.runCommand( { "collMod" : coll, "usePowerOf2Sizes" : true } );
+// Modify the collection with the usePowerOf2Sizes flag. Verify userFlags now = 0.
+var res = db.runCommand( { "collMod" : coll, "usePowerOf2Sizes" : false } );
debug( res );
assert.eq( res.ok , 1 , "collMod failed" );
-assert.eq( t.stats().userFlags , 1 , "modified collection should have userFlags = 1 ");
+assert.eq( t.stats().userFlags , 0 , "modified collection should have userFlags = 0 ");
var nso = db.system.namespaces.findOne( { name : t.getFullName() } );
debug( nso );
-assert.eq( 1, nso.options.flags, "options didn't sync to system.namespaces: " + tojson( nso ) );
+assert.eq( 0, nso.options.flags, "options didn't sync to system.namespaces: " + tojson( nso ) );
// Try to modify it with some unrecognized value
var res = db.runCommand( { "collMod" : coll, "unrecognized" : true } );
@@ -71,11 +72,11 @@ assert.eq( 0, res.ok, "shouldn't be able to modify faulty index spec" );
t.dropIndex( {a : 1 } );
t.ensureIndex( {a : 1} , { "expireAfterSeconds": 50 } )
var res = db.runCommand( { "collMod" : coll ,
- "usePowerOf2Sizes" : false,
+ "usePowerOf2Sizes" : true,
"index" : { "keyPattern" : {a : 1} , "expireAfterSeconds" : 100 } } );
debug( res );
assert.eq( 1, res.ok, "should be able to modify both userFlags and expireAfterSeconds" );
-assert.eq( t.stats().userFlags , 0 , "userflags should be 0 now");
+assert.eq( t.stats().userFlags , 1 , "userflags should be 1 now");
assert.eq( 1, db.system.indexes.count( { key : {a:1}, expireAfterSeconds : 100 } ),
"TTL index should be 100 now" );
diff --git a/jstests/compactPreservePadding.js b/jstests/compactPreservePadding.js
index dd0c6ddf8cb..5a4ac949f49 100644
--- a/jstests/compactPreservePadding.js
+++ b/jstests/compactPreservePadding.js
@@ -1,24 +1,24 @@
// test preservePadding
-collName = "compactPreservePadding";
-mydb = db.getSisterDB("compactPreservePaddingDB");
-mydb.dropDatabase();
-mydb.createCollection(collName);
-// ensure there is some padding by using power of 2 sizes
-mydb.runCommand({collMod: collName, usePowerOf2Sizes: true});
-t = mydb.compactPreservePadding;
+
+var collName = "compactPreservePadding";
+var t = db.getCollection(collName);
t.drop();
-// populate db
+
+// use larger keyname to avoid hitting an edge case with extents
for (i = 0; i < 10000; i++) {
- t.insert({x:i});
+ t.insert({useLargerKeyName:i});
}
+
// remove half the entries
-t.remove({x:{$mod:[2,0]}})
+t.remove({useLargerKeyName:{$mod:[2,0]}})
printjson(t.stats());
originalSize = t.stats().size;
originalStorage = t.stats().storageSize;
+
// compact!
-mydb.runCommand({compact: collName, preservePadding: true});
+db.runCommand({compact: collName, preservePadding: true});
printjson(t.stats());
+
// object sizes ('size') should be the same (unless we hit an edge case involving extents, which
// this test doesn't) and storage size should shrink
assert(originalSize == t.stats().size);
diff --git a/jstests/datasize.js b/jstests/datasize.js
index 277efac4bdf..13e9f11bf0c 100644
--- a/jstests/datasize.js
+++ b/jstests/datasize.js
@@ -1,6 +1,11 @@
+// test end-to-end data allocation without powerOf2Sizes enabled
f = db.jstests_datasize;
f.drop();
+// this test requires usePowerOf2Sizes to be off
+db.createCollection( f.getName(), { usePowerOf2Sizes: false } );
+assert.eq(0, f.stats().userFlags);
+
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
@@ -8,6 +13,8 @@ f.save( {qq:'fg'} );
assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.drop();
+db.createCollection( f.getName(), { usePowerOf2Sizes: false} );
+
f.ensureIndex( {qq:1} );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
diff --git a/jstests/fts1.js b/jstests/fts1.js
index 6af3f133694..6bd138d6c25 100644
--- a/jstests/fts1.js
+++ b/jstests/fts1.js
@@ -1,9 +1,12 @@
-
load( "jstests/libs/fts.js" );
t = db.text1;
t.drop();
+// this test requires usePowerOf2Sizes to be off
+db.createCollection( t.getName(), {"usePowerOf2Sizes" : false } );
+assert.eq(0, t.stats().userFlags);
+
assert.eq( [] , queryIDS( t , "az" ) , "A0" );
t.save( { _id : 1 , x : "az b c" } );
@@ -24,4 +27,3 @@ assert.eq( [4] , queryIDS( t , "d" ) , "A2" );
idx = db.system.indexes.findOne( { ns: t.getFullName(), "weights.x" : 1 } )
assert( idx.v >= 1, tojson( idx ) )
assert( idx.textIndexVersion >= 1, tojson( idx ) )
-
diff --git a/jstests/padding.js b/jstests/padding.js
index af68e7212ed..1872574d80f 100644
--- a/jstests/padding.js
+++ b/jstests/padding.js
@@ -1,6 +1,10 @@
p = db.getCollection("padding");
p.drop();
+// this test requires usePowerOf2Sizes to be off
+db.createCollection( p.getName(), { "usePowerOf2Sizes" : false } );
+assert.eq(0, p.stats().userFlags);
+
for (var i = 0; i < 1000; i++) {
p.insert({ x: 1, y: "aaaaaaaaaaaaaaa" });
}
diff --git a/jstests/profile3.js b/jstests/profile3.js
index a7072be235c..6091cdafdb3 100644
--- a/jstests/profile3.js
+++ b/jstests/profile3.js
@@ -26,6 +26,7 @@ try {
db.setProfilingLevel(2);
+ db.createCollection(t.getName(), {usePowerOf2Sizes: false});
t.insert( { x : 1 } );
t.findOne( { x : 1 } );
t.find( { x : 1 } ).count();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 80c18400f46..93d178222f5 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -4,7 +4,7 @@
var doTest = function() {
var rsOpts = { oplogSize: 10, verbose : 2, useHostname : false };
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 1, config : 3,
+var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 2, config : 3,
rs : rsOpts, other : { nopreallocj : 1, verbose : 2, useHostname : false }});
var mongos = st.s;
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 82d8b43a474..28f3a963239 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -126,7 +126,7 @@ var setupSharding = function(mongo) {
mongo.getDB("admin").runCommand({shardCollection : "test.foo", key : {_id : 1}});
var test = mongo.getDB("test");
- for(i = 1; i < 40; i++) {
+ for(i = 1; i < 20; i++) {
test.foo.insert({_id: i});
}
};
@@ -228,4 +228,4 @@ var runTest = function(useHostName) {
}
runTest(false);
-runTest(true); \ No newline at end of file
+runTest(true);
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 853a671915a..2c4d156d760 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -15,7 +15,9 @@ for ( x=0; x<100; x++ )
coll.insert( { x : x , big : big } )
db.getLastError();
-s.adminCommand( { split : "test.foo" , middle : { x : 33 } } )
+db.printShardingStatus()
+
+s.adminCommand( { split : "test.foo" , middle : { x : 30 } } )
s.adminCommand( { split : "test.foo" , middle : { x : 66 } } )
s.adminCommand( { movechunk : "test.foo" , find : { x : 90 } , to : s.getOther( s.getServer( "test" ) ).name } )
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 51bd43f470a..b16c1796cd9 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,4 +1,3 @@
-
s = new ShardingTest( "shard_existing" , 2 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { chunksize : 1 } )
db = s.getDB( "test" )
@@ -6,12 +5,16 @@ db = s.getDB( "test" )
var stringSize = 10000;
var numDocs = 2000;
+
// we want a lot of data, so lets make a string to cheat :)
var bigString = new Array(stringSize).toString();
var docSize = Object.bsonsize({ _id: numDocs, s: bigString });
var totalSize = docSize * numDocs;
print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
+// turn off powerOf2Sizes as this tests regular allocation
+db.createCollection('data', {usePowerOf2Sizes: false});
+
for (i=0; i<numDocs; i++) {
db.data.insert({_id: i, s: bigString});
}
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index e67a062f71f..6378b53e85d 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -18,10 +18,10 @@ db1.createCollection( coll );
var collstats = db1.getCollection( coll ).stats()
print( "*************** Fresh Collection Stats ************" );
printjson( collstats );
-assert.eq( collstats.userFlags , 0 , "fresh collection doesn't have userFlags = 0 ");
+assert.eq( collstats.userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
// Now we modify the collection with the usePowerOf2Sizes flag
-var res = db1.runCommand( { "collMod" : coll , "usePowerOf2Sizes" : true } );
+var res = db1.runCommand( { "collMod" : coll , "usePowerOf2Sizes" : false } );
assert.eq( res.ok , 1 , "collMod failed" );
// and insert some stuff, for the hell of it
@@ -29,11 +29,11 @@ var numdocs = 20;
for( i=0; i < numdocs; i++){ db1.getCollection( coll ).insert( {_id : i} ); }
db1.getLastError()
-// Next verify that userFlags has changed to 1
+// Next verify that userFlags has changed to 0
collstats = db1.getCollection( coll ).stats()
print( "*************** Collection Stats After CollMod ************" );
printjson( collstats );
-assert.eq( collstats.userFlags , 1 , "modified collection should have userFlags = 1 ");
+assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
// start up a new sharded cluster, and add previous mongod
var s = new ShardingTest( "user_flags", 1 );
@@ -53,7 +53,7 @@ shard2stats = shard2.getCollection( coll ).stats()
printjson( shard2stats );
assert.eq( shard2stats.count , numdocs , "moveChunk didn't succeed" );
-assert.eq( shard2stats.userFlags , 1 , "new shard should also have userFlags = 1 ");
+assert.eq( shard2stats.userFlags , 0 , "new shard should also have userFlags = 0 ");
stopMongod( 29000 );
s.stop();
diff --git a/jstests/slowNightly/ttl_repl.js b/jstests/slowNightly/ttl_repl.js
index 1f41d3d36ea..3b251dfa8a9 100644
--- a/jstests/slowNightly/ttl_repl.js
+++ b/jstests/slowNightly/ttl_repl.js
@@ -23,8 +23,11 @@ var slave1db = slave1.getDB( 'd' );
var mastercol = masterdb[ 'c' ];
var slave1col = slave1db[ 'c' ];
-// create new collection. insert 24 docs, aged at one-hour intervalss
+// turn off usePowerOf2Sizes as this tests the flag is set automatically
mastercol.drop();
+masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false});
+
+// create new collection. insert 24 docs, aged at one-hour intervalss
now = (new Date()).getTime();
for ( i=0; i<24; i++ )
mastercol.insert( { x : new Date( now - ( 3600 * 1000 * i ) ) } );
diff --git a/jstests/splitvector.js b/jstests/splitvector.js
index b8f293582a7..d239625de67 100644
--- a/jstests/splitvector.js
+++ b/jstests/splitvector.js
@@ -45,12 +45,17 @@ var assertFieldNamesMatch = function( splitPoint , keyPattern ){
}
}
+// turn off powerOf2, this test checks regular allocation
+var resetCollection = function() {
+ f.drop();
+ db.createCollection(f.getName(), {usePowerOf2Sizes: false});
+}
+
// -------------------------
// TESTS START HERE
// -------------------------
-
f = db.jstests_splitvector;
-f.drop();
+resetCollection();
// -------------------------
// Case 1: missing parameters
@@ -75,7 +80,7 @@ assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPat
// -------------------------
// Case 4: uniform collection
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case4 = function() {
@@ -109,7 +114,7 @@ case4();
// -------------------------
// Case 5: limit number of split points
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case5 = function() {
@@ -132,7 +137,7 @@ case5();
// -------------------------
// Case 6: limit number of objects in a chunk
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case6 = function() {
@@ -156,7 +161,7 @@ case6();
// Case 7: enough occurances of min key documents to pass the chunk limit
// [1111111111111111,2,3)
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case7 = function() {
@@ -184,7 +189,7 @@ case7();
// Case 8: few occurrances of min key, and enough of some other that we cannot split it
// [1, 22222222222222, 3)
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case8 = function() {
@@ -218,7 +223,7 @@ case8();
// Case 9: splitVector "force" mode, where we split (possible small) chunks in the middle
//
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1 } );
var case9 = function() {
@@ -253,51 +258,51 @@ case9();
// Repeat all cases using prefix shard key.
//
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case4();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case4();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case5();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case5();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case6();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case6();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case7();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case7();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case8();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case8();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: 1 } );
case9();
-f.drop();
+resetCollection();
f.ensureIndex( { x: 1, y: -1 , z : 1 } );
case9();
diff --git a/jstests/tool/dumprestore_auth.js b/jstests/tool/dumprestore_auth.js
index 9349681129e..55f18b018db 100644
--- a/jstests/tool/dumprestore_auth.js
+++ b/jstests/tool/dumprestore_auth.js
@@ -13,8 +13,9 @@ adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
assert.eq(0 , c.count() , "setup1");
c.save({ a : 22 });
assert.eq(1 , c.count() , "setup2");
-assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: true}));
-assert.eq(1, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
@@ -28,7 +29,7 @@ t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "p
assert.soon("c.findOne()" , "no data after sleep");
assert.eq(1 , c.count() , "after restore 2");
assert.eq(22 , c.findOne().a , "after restore 2");
-assert.eq(1, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
assert.eq(3, adminDB.system.users.count());
t.stop();
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 8c2fe0e1f2c..74c5bccbcbc 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -47,11 +47,14 @@
#include "mongo/db/pdfile.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/ops/delete.h"
+#include "mongo/db/server_parameters.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/catalog/collection.h"
namespace mongo {
+ MONGO_EXPORT_SERVER_PARAMETER(newCollectionsUsePowerOf2Sizes, bool, true);
+
void massertNamespaceNotIndex( const StringData& ns, const StringData& caller ) {
massert( 17320,
str::stream() << "cannot do " << caller
@@ -555,6 +558,7 @@ namespace mongo {
return c;
}
+
Collection* Database::createCollection( const StringData& ns, bool capped,
const BSONObj* options, bool allocateDefaultSpace ) {
verify( _namespaceIndex.details( ns ) == NULL );
@@ -569,6 +573,20 @@ namespace mongo {
audit::logCreateCollection( currentClient.get(), ns );
+ // allocation strategy set explicitly in flags or by server-wide default
+ // need to check validity before creating the collection
+ int userFlags = 0;
+ bool flagSet = false;
+
+ if ( options && options->getField("flags").type() ) {
+ uassert( 17351, "flags must be a number", options->getField("flags").isNumber() );
+ userFlags = options->getField("flags").numberInt();
+ flagSet = true;
+ }
+ if ( newCollectionsUsePowerOf2Sizes && !flagSet && !capped ) {
+ userFlags = NamespaceDetails::Flag_UsePowerOf2Sizes;
+ }
+
_namespaceIndex.add_ns( ns, DiskLoc(), capped );
_addNamespaceToCatalog( ns, options );
@@ -584,6 +602,9 @@ namespace mongo {
Collection* collection = getCollection( ns );
verify( collection );
+ NamespaceDetails* nsd = collection->details();
+ nsd->setUserFlag( userFlags );
+
if ( allocateDefaultSpace ) {
collection->increaseStorageSize( Extent::initialSize( 128 ), false );
}
@@ -599,6 +620,7 @@ namespace mongo {
}
}
+
return collection;
}
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 3ab86037cc8..7b142cf64ab 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -277,10 +277,6 @@ namespace mongo {
if ( mx > 0 )
d->setMaxCappedDocs( mx );
- if ( options["flags"].numberInt() ) {
- d->replaceUserFlags( options["flags"].numberInt() );
- }
-
return true;
}
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 9aa68ba6e67..6658b5329cd 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1358,6 +1358,7 @@ namespace NamespaceTests {
public:
void run() {
create();
+ ASSERT( nsd()->clearUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
ASSERT_EQUALS( 1.0, nsd()->paddingFactor() );
ASSERT_EQUALS( 300, nsd()->getRecordAllocationSize( 300 ) );
}
@@ -1371,6 +1372,7 @@ namespace NamespaceTests {
create();
double paddingFactor = 1.2;
nsd()->setPaddingFactor( paddingFactor );
+ ASSERT( nsd()->clearUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
ASSERT_EQUALS( paddingFactor, nsd()->paddingFactor() );
ASSERT_EQUALS( static_cast<int>( 300 * paddingFactor ),
nsd()->getRecordAllocationSize( 300 ) );
@@ -1386,7 +1388,6 @@ namespace NamespaceTests {
public:
void run() {
create();
- ASSERT( nsd()->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
ASSERT( nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
ASSERT_EQUALS( 512, nsd()->getRecordAllocationSize( 300 ) );
}
@@ -1402,7 +1403,6 @@ namespace NamespaceTests {
public:
void run() {
create();
- ASSERT( nsd()->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
ASSERT( nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
nsd()->setPaddingFactor( 2.0 );
ASSERT_EQUALS( 2.0, nsd()->paddingFactor() );
@@ -1429,6 +1429,7 @@ namespace NamespaceTests {
public:
void run() {
create();
+ ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
DiskLoc loc = nsd()->alloc( ns(), 300 );
ASSERT_EQUALS( 300, loc.rec()->lengthWithHeaders() );
}
diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js
index db0c4c27386..55bf47d7891 100644
--- a/src/mongo/shell/db.js
+++ b/src/mongo/shell/db.js
@@ -80,6 +80,7 @@ DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
capped: if true, this is a capped collection (where old data rolls out).
</li>
<li> max: maximum number of objects if capped (optional).</li>
+ <li> usePowerOf2Sizes: if true, set usePowerOf2Sizes allocation for the collection.</li>
</ul>
<p>Example:</p>
@@ -100,6 +101,8 @@ DB.prototype.createCollection = function(name, opt) {
cmd.capped = options.capped;
if (options.size != undefined)
cmd.size = options.size;
+ if (options.usePowerOf2Sizes != undefined)
+ cmd.flags = options.usePowerOf2Sizes ? 1 : 0;
var res = this._dbCommand(cmd);
return res;
}