summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-02-18 21:57:01 -0500
committerEliot Horowitz <eliot@10gen.com>2014-02-18 23:11:00 -0500
commit6718e33f5ecd0de4a8550afaf789c2fc416b6eee (patch)
tree64385a447c8792b16f6c190dd629afebd1ff0f4b
parent9a9baaaf78677b939b8267d6e9266ec88c345b6b (diff)
downloadmongo-6718e33f5ecd0de4a8550afaf789c2fc416b6eee.tar.gz
SERVER:8412: re-write repairDatabase to use new constructs
to make clean, also fix Database::createColection
-rw-r--r--jstests/repair.js14
-rw-r--r--jstests/tool/dumprestoreWithNoOptions.js12
-rw-r--r--jstests/tool/dumprestore_auth2.js3
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/base/error_codes.err1
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp8
-rw-r--r--src/mongo/db/auth/auth_index_d.h5
-rw-r--r--src/mongo/db/catalog/collection.cpp29
-rw-r--r--src/mongo/db/catalog/collection.h8
-rw-r--r--src/mongo/db/catalog/database.cpp195
-rw-r--r--src/mongo/db/catalog/database.h51
-rw-r--r--src/mongo/db/catalog/index_create.cpp70
-rw-r--r--src/mongo/db/catalog/index_create.h40
-rw-r--r--src/mongo/db/cloner.cpp6
-rw-r--r--src/mongo/db/cloner.h1
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp19
-rw-r--r--src/mongo/db/commands/rename_collection.cpp7
-rw-r--r--src/mongo/db/db.cpp16
-rw-r--r--src/mongo/db/dbcommands.cpp7
-rw-r--r--src/mongo/db/dbhelpers.cpp7
-rw-r--r--src/mongo/db/dbhelpers.h4
-rw-r--r--src/mongo/db/durop.cpp5
-rw-r--r--src/mongo/db/pdfile.cpp415
-rw-r--r--src/mongo/db/pdfile.h6
-rw-r--r--src/mongo/db/repair_database.cpp428
-rw-r--r--src/mongo/db/repair_database.h52
-rw-r--r--src/mongo/db/structure/collection_compact.cpp59
-rw-r--r--src/mongo/dbtests/counttests.cpp2
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp12
-rw-r--r--src/mongo/dbtests/namespacetests.cpp1
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp2
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp26
-rw-r--r--src/mongo/dbtests/querytests.cpp5
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp5
36 files changed, 978 insertions, 548 deletions
diff --git a/jstests/repair.js b/jstests/repair.js
index df06a49fa8c..445146309bc 100644
--- a/jstests/repair.js
+++ b/jstests/repair.js
@@ -1,7 +1,21 @@
mydb = db.getSisterDB( "repair_test1" )
+
t = mydb.jstests_repair;
t.drop();
+
t.save( { i:1 } );
+doc = t.findOne();
+t.ensureIndex( { i : 1 } );
+assert.eq( 2, t.getIndexes().length );
+ex = t.find( { i : 1 } ).explain();
+
assert.commandWorked( mydb.repairDatabase() );
+
v = t.validate();
assert( v.valid , "not valid! " + tojson( v ) );
+
+assert.eq( 1, t.count() );
+assert.eq( doc, t.findOne() );
+
+assert.eq( 2, t.getIndexes().length, tojson( t.getIndexes() ) );
+assert.eq( ex, t.find( { i : 1 } ).explain() );
diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js
index 4919e394c99..d6b87ffe70c 100644
--- a/jstests/tool/dumprestoreWithNoOptions.js
+++ b/jstests/tool/dumprestoreWithNoOptions.js
@@ -18,12 +18,13 @@ dbname2 = "NOT_"+dbname;
db.dropDatabase();
-var options = { capped: true, size: 1000, autoIndexId: true };
+var options = { capped: true, size: 4096, autoIndexId: true };
db.createCollection('capped', options);
assert.eq( 1, db.system.indexes.count(), "auto index not created" );
var cappedOptions = db.capped.exists().options;
for ( var opt in options ) {
- assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
}
db.capped.insert({ x: 1 });
db.getLastError()
@@ -40,12 +41,13 @@ t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
assert(true !== db.capped.stats().capped, "restore options were not ignored");
-assert(undefined === db.capped.exists().options, "restore options not ignored");
+assert(undefined === db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
// Dump/restore single DB
db.dropDatabase();
-var options = { capped: true, size: 1000, autoIndexId: true };
+var options = { capped: true, size: 4096, autoIndexId: true };
db.createCollection('capped', options);
assert.eq( 1, db.system.indexes.count(), "auto index not created" );
var cappedOptions = db.capped.exists().options;
@@ -74,7 +76,7 @@ assert(undefined === db.capped.exists().options, "restore options not ignored");
// Dump/restore single collection
db.dropDatabase();
-var options = { capped: true, size: 1000, autoIndexId: true };
+var options = { capped: true, size: 4096, autoIndexId: true };
db.createCollection('capped', options);
assert.eq( 1, db.system.indexes.count(), "auto index not created" );
var cappedOptions = db.capped.exists().options;
diff --git a/jstests/tool/dumprestore_auth2.js b/jstests/tool/dumprestore_auth2.js
index 987ce0aa59f..bc684b7a030 100644
--- a/jstests/tool/dumprestore_auth2.js
+++ b/jstests/tool/dumprestore_auth2.js
@@ -11,7 +11,8 @@ db.createUser({user: 'user',pwd: 'password', roles: jsTest.basicUserRoles});
db.createRole({role: 'role', roles: [], privileges:[]});
assert.eq(1, db.system.users.count(), "setup")
-assert.eq(2, db.system.indexes.count({ns: "admin.system.users"}), "setup2")
+assert.eq(2, db.system.indexes.count({ns: "admin.system.users"}),
+ "setup2: " + tojson( db.system.users.getIndexes() ) );
assert.eq(1, db.system.roles.count(), "setup3")
assert.eq(2, db.system.indexes.count({ns: "admin.system.roles"}), "setup4")
assert.eq(1, db.system.version.count());
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index 7f9f178f98b..721c58b52d5 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -602,6 +602,7 @@ serverOnlyFiles = [ "db/curop.cpp",
"db/catalog/database_holder.cpp",
"db/background.cpp",
"db/pdfile.cpp",
+ "db/repair_database.cpp",
"db/storage/data_file.cpp",
"db/storage/extent.cpp",
"db/storage/extent_manager.cpp",
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index 36e1fd0f6dc..0e84bf0d344 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -87,5 +87,6 @@ error_code("NoProgressMade", 82)
error_code("DuplicateKey", 11000)
error_code("NotMaster", 10107) #this comes from assert_util.h
error_code("Interrupted", 11601)
+error_code("OutOfDiskSpace", 14031 )
error_class("NetworkError", ["HostUnreachable", "HostNotFound"])
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 0053ffa50d5..5834b7d53c7 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -90,10 +90,12 @@ namespace {
}
}
- void createSystemIndexes(const NamespaceString& ns) {
+ void createSystemIndexes(Collection* collection) {
+ invariant( collection );
+ const NamespaceString& ns = collection->ns();
if (ns == AuthorizationManager::usersCollectionNamespace) {
try {
- Helpers::ensureIndex(ns.ns().c_str(),
+ Helpers::ensureIndex(collection,
v3SystemUsersKeyPattern,
true, // unique
v3SystemUsersIndexName.c_str());
@@ -107,7 +109,7 @@ namespace {
}
} else if (ns == AuthorizationManager::rolesCollectionNamespace) {
try {
- Helpers::ensureIndex(ns.ns().c_str(),
+ Helpers::ensureIndex(collection,
v3SystemRolesKeyPattern,
true, // unique
v3SystemRolesIndexName.c_str());
diff --git a/src/mongo/db/auth/auth_index_d.h b/src/mongo/db/auth/auth_index_d.h
index 63334c80e2c..24843f6f0f0 100644
--- a/src/mongo/db/auth/auth_index_d.h
+++ b/src/mongo/db/auth/auth_index_d.h
@@ -31,13 +31,16 @@
#include "mongo/db/namespace_string.h"
namespace mongo {
+
+ class Collection;
+
namespace authindex {
/**
* Creates the appropriate indexes on _new_ system collections supporting authentication and
* authorization.
*/
- void createSystemIndexes(const NamespaceString& ns);
+ void createSystemIndexes(Collection* collection);
/**
* Ensures that exactly the appropriate indexes to support authentication and authorization
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 7fbb73c6a5d..0793732a957 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/commands/server_status.h"
#include "mongo/db/curop.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/index_create.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/repl/rs.h"
@@ -191,15 +192,37 @@ namespace mongo {
return status;
}
- StatusWith<DiskLoc> Collection::_insertDocument( const BSONObj& docToInsert, bool enforceQuota ) {
+ StatusWith<DiskLoc> Collection::insertDocument( const BSONObj& doc,
+ MultiIndexBlock& indexBlock ) {
+ StatusWith<DiskLoc> loc = _recordStore->insertRecord( doc.objdata(),
+ doc.objsize(),
+ 0 );
+
+ if ( !loc.isOK() )
+ return loc;
+
+ InsertDeleteOptions indexOptions;
+ indexOptions.logIfError = false;
+ indexOptions.dupsAllowed = true; // in repair we should be doing no checking
+
+ Status status = indexBlock.insert( doc, loc.getValue(), indexOptions );
+ if ( !status.isOK() )
+ return StatusWith<DiskLoc>( status );
+
+ return loc;
+ }
+
+
+ StatusWith<DiskLoc> Collection::_insertDocument( const BSONObj& docToInsert,
+ bool enforceQuota ) {
// TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
// under the RecordStore, this feels broken since that should be a
// collection access method probably
StatusWith<DiskLoc> loc = _recordStore->insertRecord( docToInsert.objdata(),
- docToInsert.objsize(),
- enforceQuota ? largestFileNumberInQuota() : 0 );
+ docToInsert.objsize(),
+ enforceQuota ? largestFileNumberInQuota() : 0 );
if ( !loc.isOK() )
return loc;
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 9e97c070832..17b88b94bdb 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -48,6 +48,7 @@ namespace mongo {
class ExtentManager;
class NamespaceDetails;
class IndexCatalog;
+ class MultiIndexBlock;
class CollectionIterator;
class FlatIterator;
@@ -160,6 +161,8 @@ namespace mongo {
StatusWith<DiskLoc> insertDocument( const DocWriter* doc, bool enforceQuota );
+ StatusWith<DiskLoc> insertDocument( const BSONObj& doc, MultiIndexBlock& indexBlock );
+
/**
* updates the document @ oldLocation with newDoc
* if the document fits in the old space, it is put there
@@ -209,10 +212,11 @@ namespace mongo {
* - some user error checks
* - adjust padding
*/
- StatusWith<DiskLoc> _insertDocument( const BSONObj& doc, bool enforceQuota );
+ StatusWith<DiskLoc> _insertDocument( const BSONObj& doc,
+ bool enforceQuota );
void _compactExtent(const DiskLoc diskloc, int extentNumber,
- vector<IndexAccessMethod*>& indexesToInsertTo,
+ MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats );
// @return 0 for inf., otherwise a number of files
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 81fa8eb3770..18fe275fee9 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -55,6 +55,93 @@ namespace mongo {
MONGO_EXPORT_SERVER_PARAMETER(newCollectionsUsePowerOf2Sizes, bool, true);
+ Status CollectionOptions::parse( const BSONObj& options ) {
+ reset();
+
+ BSONObjIterator i( options );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ StringData fieldName = e.fieldName();
+
+ if ( fieldName == "capped" ) {
+ capped = e.trueValue();
+ }
+ else if ( fieldName == "size" ) {
+ if ( !e.isNumber() )
+ return Status( ErrorCodes::BadValue, "size has to be a number" );
+ cappedSize = e.numberLong();
+ if ( cappedSize < 0 )
+ return Status( ErrorCodes::BadValue, "size has to be >= 0" );
+ cappedSize += 0xff;
+ cappedSize &= 0xffffffffffffff00LL;
+ if ( cappedSize < Extent::minSize() )
+ cappedSize = Extent::minSize();
+ }
+ else if ( fieldName == "max" ) {
+ if ( !e.isNumber() )
+ return Status( ErrorCodes::BadValue, "max has to be a number" );
+ cappedMaxDocs = e.numberLong();
+ if ( !NamespaceDetails::validMaxCappedDocs( &cappedMaxDocs ) )
+ return Status( ErrorCodes::BadValue,
+ "max in a capped collection has to be < 2^31 or not set" );
+ }
+ else if ( fieldName == "$nExtents" ) {
+ if ( e.type() == Array ) {
+ BSONObjIterator j( e.Obj() );
+ while ( j.more() ) {
+ BSONElement inner = j.next();
+ initialExtentSizes.push_back( inner.numberInt() );
+ }
+ }
+ else {
+ initialNumExtents = e.numberLong();
+ }
+ }
+ else if ( fieldName == "autoIndexId" ) {
+ if ( e.trueValue() )
+ autoIndexId = YES;
+ else
+ autoIndexId = NO;
+ }
+ else if ( fieldName == "flags" ) {
+ flags = e.numberInt();
+ flagsSet = true;
+ }
+ else if ( fieldName == "temp" ) {
+ temp = e.trueValue();
+ }
+ }
+
+ return Status::OK();
+ }
+
+ BSONObj CollectionOptions::toBSON() const {
+ BSONObjBuilder b;
+ if ( capped ) {
+ b.appendBool( "capped", true );
+ if ( cappedSize )
+ b.appendNumber( "size", cappedSize );
+ if ( cappedMaxDocs )
+ b.appendNumber( "max", cappedMaxDocs );
+ }
+
+ if ( initialNumExtents )
+ b.appendNumber( "$nExtents", initialNumExtents );
+ if ( !initialExtentSizes.empty() )
+ b.append( "$nExtents", initialExtentSizes );
+
+ if ( autoIndexId != DEFAULT )
+ b.appendBool( "autoIndexId", autoIndexId == YES );
+
+ if ( flagsSet )
+ b.append( "flags", flags );
+
+ if ( temp )
+ b.appendBool( "temp", true );
+
+ return b.obj();
+ }
+
void massertNamespaceNotIndex( const StringData& ns, const StringData& caller ) {
massert( 17320,
str::stream() << "cannot do " << caller
@@ -545,10 +632,21 @@ namespace mongo {
return c;
}
+ namespace {
+ int _massageExtentSize( long long size ) {
+ if ( size < Extent::minSize() )
+ return Extent::minSize();
+ if ( size > Extent::maxSize() )
+ return Extent::maxSize();
+ return static_cast<int>( size );
+ }
+ }
- Collection* Database::createCollection( const StringData& ns, bool capped,
- const BSONObj* options, bool allocateDefaultSpace ) {
- verify( _namespaceIndex.details( ns ) == NULL );
+ Collection* Database::createCollection( const StringData& ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace,
+ bool createIdIndex ) {
+ massert( 17399, "collection already exists", _namespaceIndex.details( ns ) == NULL );
massertNamespaceNotIndex( ns, "createCollection" );
_namespaceIndex.init();
@@ -566,55 +664,74 @@ namespace mongo {
ns.size() <= Namespace::MaxNsColletionLen);
}
- audit::logCreateCollection( currentClient.get(), ns );
-
- // allocation strategy set explicitly in flags or by server-wide default
- // need to check validity before creating the collection
- int userFlags = 0;
- bool flagSet = false;
-
- if ( options && options->getField("flags").type() ) {
- uassert( 17351, "flags must be a number", options->getField("flags").isNumber() );
- userFlags = options->getField("flags").numberInt();
- flagSet = true;
- }
- if ( newCollectionsUsePowerOf2Sizes && !flagSet && !capped ) {
- userFlags = NamespaceDetails::Flag_UsePowerOf2Sizes;
- }
+ NamespaceString nss( ns );
+ uassert( 17316, "cannot create a blank collection", nss.coll() > 0 );
- _namespaceIndex.add_ns( ns, DiskLoc(), capped );
- _addNamespaceToCatalog( ns, options );
-
- // TODO: option for: allocation, indexes?
-
- StringData collectionName = nsToCollectionSubstring( ns );
- uassert( 17316, "cannot create a blank collection", collectionName.size() );
+ audit::logCreateCollection( currentClient.get(), ns );
- if ( collectionName.startsWith( "system." ) ) {
- authindex::createSystemIndexes( ns );
- }
+ _namespaceIndex.add_ns( ns, DiskLoc(), options.capped );
+ BSONObj optionsAsBSON = options.toBSON();
+ _addNamespaceToCatalog( ns, &optionsAsBSON );
Collection* collection = getCollection( ns );
- verify( collection );
+ massert( 17400, "_namespaceIndex.add_ns failed?", collection );
NamespaceDetails* nsd = collection->details();
- nsd->setUserFlag( userFlags );
- if ( allocateDefaultSpace ) {
- collection->increaseStorageSize( Extent::initialSize( 128 ), false );
+ // allocation strategy set explicitly in flags or by server-wide default
+ // need to check validity before creating the collection
+ if ( options.flagsSet ) {
+ nsd->setUserFlag( options.flags );
}
+ else if ( !options.capped ) {
+ nsd->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes );
+ }
+
+ if ( options.cappedMaxDocs > 0 )
+ nsd->setMaxCappedDocs( options.cappedMaxDocs );
- if ( collection->requiresIdIndex() ) {
- if ( options &&
- options->getField("autoIndexId").type() &&
- !options->getField("autoIndexId").trueValue() ) {
- // do not create
+ if ( allocateDefaultSpace ) {
+ if ( options.initialNumExtents > 0 ) {
+ int size = _massageExtentSize( options.cappedSize );
+ for ( int i = 0; i < options.initialNumExtents; i++ ) {
+ collection->increaseStorageSize( size, false );
+ }
+ }
+ else if ( !options.initialExtentSizes.empty() ) {
+ for ( size_t i = 0; i < options.initialExtentSizes.size(); i++ ) {
+ int size = options.initialExtentSizes[i];
+ size = _massageExtentSize( size );
+ collection->increaseStorageSize( size, false );
+ }
+ }
+ else if ( options.capped ) {
+ // normal
+ long long size = options.cappedSize;
+ while ( size > 0 ) {
+ int mySize = _massageExtentSize( size );
+ mySize &= 0xffffff00;
+ Extent* e = collection->increaseStorageSize( mySize, true );
+ size -= e->length;
+ }
}
else {
- uassertStatusOK( collection->getIndexCatalog()->ensureHaveIdIndex() );
+ collection->increaseStorageSize( Extent::initialSize( 128 ), false );
}
}
+ if ( createIdIndex ) {
+ if ( collection->requiresIdIndex() ) {
+ if ( options.autoIndexId == CollectionOptions::YES ||
+ options.autoIndexId == CollectionOptions::DEFAULT ) {
+ uassertStatusOK( collection->getIndexCatalog()->ensureHaveIdIndex() );
+ }
+ }
+
+ if ( nss.isSystem() ) {
+ authindex::createSystemIndexes( collection );
+ }
+
+ }
return collection;
}
@@ -629,7 +746,7 @@ namespace mongo {
BSONObjBuilder b;
b.append("name", ns);
- if ( options )
+ if ( options && !options->isEmpty() )
b.append("options", *options);
BSONObj obj = b.done();
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 262696ae4a2..bdeb0983bac 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -44,6 +44,51 @@ namespace mongo {
class IndexCatalog;
class IndexDetails;
+ struct CollectionOptions {
+ CollectionOptions() {
+ reset();
+ }
+
+ void reset() {
+ capped = false;
+ cappedSize = 0;
+ cappedMaxDocs = 0;
+ initialNumExtents = 0;
+ initialExtentSizes.clear();
+ autoIndexId = DEFAULT;
+ flags = 0;
+ flagsSet = false;
+ temp = false;
+ }
+
+ Status parse( const BSONObj& obj );
+ BSONObj toBSON() const;
+
+ // ----
+
+ bool capped;
+ long long cappedSize;
+ long long cappedMaxDocs;
+
+ // following 2 are mutually exclusive, can only have one set
+ long long initialNumExtents;
+ vector<long long> initialExtentSizes;
+
+ // behavior of _id index creation when collection created
+ void setNoIdIndex() { autoIndexId = NO; }
+ enum {
+ DEFAULT, // currently yes for most collections, NO for some system ones
+ YES, // create _id index
+ NO // do not create _id index
+ } autoIndexId;
+
+ // user flags
+ int flags;
+ bool flagsSet;
+
+ bool temp;
+ };
+
/**
* Database represents a database database
* Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
@@ -120,9 +165,9 @@ namespace mongo {
Status dropCollection( const StringData& fullns );
Collection* createCollection( const StringData& ns,
- bool capped = false,
- const BSONObj* options = NULL,
- bool allocateDefaultSpace = true );
+ const CollectionOptions& options = CollectionOptions(),
+ bool allocateSpace = true,
+ bool createDefaultIndexes = true );
/**
* @param ns - this is fully qualified, which is maybe not ideal ???
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 59528dd0cd8..9cf89dd9d90 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -250,7 +250,12 @@ namespace mongo {
LOG(1) << "\t bulk commit starting";
std::set<DiskLoc> dupsToDrop;
- btreeState->accessMethod()->commitBulk( bulk, mayInterrupt, &dupsToDrop );
+ Status status = btreeState->accessMethod()->commitBulk( bulk,
+ mayInterrupt,
+ &dupsToDrop );
+ massert( 17398,
+ str::stream() << "commitBulk failed: " << status.toString(),
+ status.isOK() );
if ( dupsToDrop.size() )
log() << "\t bulk dropping " << dupsToDrop.size() << " dups";
@@ -277,5 +282,68 @@ namespace mongo {
collection->infoCache()->addedIndex();
}
+ // ----------------------------
+
+ MultiIndexBlock::MultiIndexBlock( Collection* collection )
+ : _collection( collection ) {
+ }
+
+ Status MultiIndexBlock::init( std::vector<BSONObj>& indexSpecs ) {
+ for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
+ BSONObj info = indexSpecs[i];
+ info = _collection->getIndexCatalog()->fixIndexSpec( info );
+
+ IndexState state;
+ state.block.reset( new IndexCatalog::IndexBuildBlock( _collection, info ) );
+ Status status = state.block->init();
+ if ( !status.isOK() )
+ return status;
+
+ state.real = state.block->getEntry()->accessMethod();
+ status = state.real->initializeAsEmpty();
+ if ( !status.isOK() )
+ return status;
+
+ state.bulk = state.real->initiateBulk();
+
+ _states.push_back( state );
+ }
+
+ return Status::OK();
+ }
+
+ Status MultiIndexBlock::insert( const BSONObj& doc,
+ const DiskLoc& loc,
+ const InsertDeleteOptions& options ) {
+
+ for ( size_t i = 0; i < _states.size(); i++ ) {
+ Status idxStatus = _states[i].forInsert()->insert( doc,
+ loc,
+ options,
+ NULL );
+ if ( !idxStatus.isOK() )
+ return idxStatus;
+ }
+ return Status::OK();
+ }
+
+ Status MultiIndexBlock::commit() {
+ for ( size_t i = 0; i < _states.size(); i++ ) {
+ if ( _states[i].bulk == NULL )
+ continue;
+ Status status = _states[i].real->commitBulk( _states[i].bulk,
+ false,
+ NULL );
+ if ( !status.isOK() )
+ return status;
+ }
+
+ for ( size_t i = 0; i < _states.size(); i++ ) {
+ _states[i].block->success();
+ }
+
+ return Status::OK();
+ }
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index 88aad085afd..af3e000437e 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -31,11 +31,18 @@
#pragma once
#include <string>
+#include <vector>
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/base/status.h"
+#include "mongo/db/diskloc.h"
+#include "mongo/db/index/index_access_method.h"
namespace mongo {
- class IndexCatalogEntry;
+ class BSONObj;
class Collection;
+ class IndexCatalogEntry;
// Build an index in the foreground
// If background is false, uses fast index builder
@@ -44,4 +51,35 @@ namespace mongo {
IndexCatalogEntry* btreeState,
bool mayInterrupt );
+ class MultiIndexBlock {
+ MONGO_DISALLOW_COPYING( MultiIndexBlock );
+ public:
+ MultiIndexBlock( Collection* collection );
+
+ Status init( std::vector<BSONObj>& specs );
+
+ Status insert( const BSONObj& doc,
+ const DiskLoc& loc,
+ const InsertDeleteOptions& options );
+
+ Status commit();
+
+ private:
+ Collection* _collection;
+
+ struct IndexState {
+ IndexState()
+ : real( NULL ), bulk( NULL ) {
+ }
+
+ IndexAccessMethod* forInsert() { return bulk ? bulk : real; }
+
+ shared_ptr<IndexCatalog::IndexBuildBlock> block;
+ IndexAccessMethod* real;
+ IndexAccessMethod* bulk;
+ };
+
+ std::vector<IndexState> _states;
+ };
+
} // namespace mongo
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index acc74af6125..df12adf82ec 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -33,6 +33,7 @@
#include "mongo/base/init.h"
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
+#include "mongo/client/dbclientinterface.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
@@ -461,12 +462,11 @@ namespace mongo {
verify(p);
string to_name = todb + p;
- bool wantIdIndex = false;
{
string err;
const char *toname = to_name.c_str();
/* we defer building id index for performance - building it in batch is much faster */
- userCreateNS(toname, options, err, opts.logForRepl, &wantIdIndex);
+ userCreateNS(toname, options, err, opts.logForRepl, false);
}
LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
Query q;
@@ -475,7 +475,7 @@ namespace mongo {
copy(context,from_name, to_name.c_str(), false, opts.logForRepl, masterSameProcess,
opts.slaveOk, opts.mayYield, opts.mayBeInterrupted, q);
- if( wantIdIndex ) {
+ {
/* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations
that occur during the initial sync. inDBRepair makes dropDups be true.
*/
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index d6f727eca7a..255c1442f0b 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -30,6 +30,7 @@
#pragma once
+#include "mongo/client/dbclientinterface.h"
#include "mongo/db/client.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 49d15bd20b5..01fe92487c2 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -163,7 +163,7 @@ namespace mongo {
Collection* collection = db->getCollection( ns.ns() );
result.appendBool( "createdCollectionAutomatically", collection == NULL );
if ( !collection ) {
- collection = db->createCollection( ns.ns(), false, NULL );
+ collection = db->createCollection( ns.ns() );
invariant( collection );
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 535f5ef9bb0..3b40a72ede3 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -342,13 +342,15 @@ namespace mongo {
Client::WriteContext incCtx( _config.incLong );
Collection* incColl = incCtx.ctx().db()->getCollection( _config.incLong );
if ( !incColl ) {
- const BSONObj options = BSON( "autoIndexId" << false << "temp" << true );
- incColl = incCtx.ctx().db()->createCollection( _config.incLong, false,
- &options, true );
+ CollectionOptions options;
+ options.setNoIdIndex();
+ options.temp = true;
+ incColl = incCtx.ctx().db()->createCollection( _config.incLong, options );
+
// Log the createCollection operation.
BSONObjBuilder b;
b.append( "create", nsToCollectionSubstring( _config.incLong ));
- b.appendElements( options );
+ b.appendElements( options.toBSON() );
string logNs = nsToDatabase( _config.incLong ) + ".$cmd";
logOp( "c", logNs.c_str(), b.obj() );
}
@@ -400,13 +402,14 @@ namespace mongo {
Client::WriteContext tempCtx( _config.tempNamespace );
Collection* tempColl = tempCtx.ctx().db()->getCollection( _config.tempNamespace );
if ( !tempColl ) {
- const BSONObj options = BSON( "temp" << true );
- tempColl = tempCtx.ctx().db()->createCollection( _config.tempNamespace, false,
- &options, true );
+ CollectionOptions options;
+ options.temp = true;
+ tempColl = tempCtx.ctx().db()->createCollection( _config.tempNamespace, options );
+
// Log the createCollection operation.
BSONObjBuilder b;
b.append( "create", nsToCollectionSubstring( _config.tempNamespace ));
- b.appendElements( options );
+ b.appendElements( options.toBSON() );
string logNs = nsToDatabase( _config.tempNamespace ) + ".$cmd";
logOp( "c", logNs.c_str(), b.obj() );
}
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index c9526c4a7a3..b023d876f23 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -216,11 +216,10 @@ namespace mongo {
targetColl = ctx.db()->getCollection( target );
}
else {
- BSONObjBuilder spec;
- spec.appendBool( "autoIndexId", false );
- const BSONObj options = spec.obj();
+ CollectionOptions options;
+ options.setNoIdIndex();
// No logOp necessary because the entire renameCollection command is one logOp.
- targetColl = ctx.db()->createCollection( target, false, &options, true );
+ targetColl = ctx.db()->createCollection( target, options );
}
if ( !targetColl ) {
errmsg = "Failed to create target collection.";
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index bde40b1176e..9a718d02b92 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -60,9 +60,9 @@
#include "mongo/db/kill_current_op.h"
#include "mongo/db/log_process_details.h"
#include "mongo/db/mongod_options.h"
-#include "mongo/db/pdfile.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/range_deleter_service.h"
+#include "mongo/db/repair_database.h"
#include "mongo/db/repl/repl_start.h"
#include "mongo/db/repl/replication_server_status.h"
#include "mongo/db/repl/rs.h"
@@ -296,7 +296,7 @@ namespace mongo {
}
- bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ) {
+ void doDBUpgrade( const string& dbName, DataFileHeader* h ) {
static DBDirectClient db;
if ( h->version == 4 && h->versionMinor == 4 ) {
@@ -310,18 +310,17 @@ namespace mongo {
BSONObj out;
bool ok = db.runCommand( dbName , BSON( "reIndex" << c.substr( dbName.size() + 1 ) ) , out );
if ( ! ok ) {
- errmsg = "reindex failed";
- log() << "\t\t reindex failed: " << out << endl;
- return false;
+ log() << "\t\t reindex failed: " << out;
+ fassertFailed( 17393 );
}
}
getDur().writingInt(h->versionMinor) = 5;
- return true;
+ return;
}
// do this in the general case
- return repairDatabase( dbName.c_str(), errmsg );
+ fassert( 17401, repairDatabase( dbName ) );
}
void checkForIdIndexes( Database* db ) {
@@ -402,8 +401,7 @@ namespace mongo {
if (mongodGlobalParams.upgrade) {
// QUESTION: Repair even if file format is higher version than code?
- string errmsg;
- verify( doDBUpgrade( dbName , errmsg , h ) );
+ doDBUpgrade( dbName, h );
}
else {
log() << "\t Not upgrading, exiting" << endl;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index c491a37ce2d..158c3f83df0 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -65,6 +65,7 @@
#include "mongo/db/query/get_runner.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/query/query_planner.h"
+#include "mongo/db/repair_database.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/write_concern.h"
@@ -246,12 +247,12 @@ namespace mongo {
bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
e = cmdObj.getField( "backupOriginalFiles" );
bool backupOriginalFiles = e.isBoolean() && e.boolean();
- bool ok =
- repairDatabase( dbname, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
+ Status status =
+ repairDatabase( dbname, preserveClonedFilesOnFailure, backupOriginalFiles );
IndexBuilder::restoreIndexes(indexesInProg);
- return ok;
+ return appendCommandStatus( result, status );
}
} cmdRepairDatabase;
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 6dd426ecef6..33e3d2b3946 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -67,9 +67,14 @@ namespace mongo {
if ( !collection )
return;
+ ensureIndex( collection, keyPattern, unique, name );
+ }
+
+ void Helpers::ensureIndex(Collection* collection,
+ BSONObj keyPattern, bool unique, const char *name) {
BSONObjBuilder b;
b.append("name", name);
- b.append("ns", ns);
+ b.append("ns", collection->ns());
b.append("key", keyPattern);
b.appendBool("unique", unique);
BSONObj o = b.done();
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index ce7b85f3968..a8aebc58c25 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -65,6 +65,10 @@ namespace mongo {
*/
static void ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name);
+ // same as other ensureIndex
+ static void ensureIndex(Collection* collection,
+ BSONObj keyPattern, bool unique, const char *name);
+
/* fetch a single object from collection ns that matches query.
set your db SavedContext first.
diff --git a/src/mongo/db/durop.cpp b/src/mongo/db/durop.cpp
index 6f32295aed7..3a7fadf753e 100644
--- a/src/mongo/db/durop.cpp
+++ b/src/mongo/db/durop.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/durop.h"
#include "mongo/db/d_concurrency.h"
+#include "mongo/db/repair_database.h"
#include "mongo/db/storage/durable_mapped_file.h"
#include "mongo/util/alignedbuilder.h"
#include "mongo/util/file.h"
@@ -45,8 +46,6 @@ using namespace mongoutils;
namespace mongo {
- void _deleteDataFiles(const char *);
-
namespace dur {
/** read a durop from journal file referenced by br.
@@ -91,7 +90,7 @@ namespace mongo {
/** throws */
void DropDbOp::replay() {
log() << "recover replay drop db " << _db << endl;
- _deleteDataFiles(_db.c_str());
+ _deleteDataFiles(_db);
}
FileCreatedOp::FileCreatedOp(const std::string& f, unsigned long long l) :
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 5c9a6970a13..1719706451c 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -68,6 +68,7 @@ _ disallow system* manipulations from the database.
#include "mongo/db/lasterror.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/delete.h"
+#include "mongo/db/repair_database.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/sort_phase_one.h"
#include "mongo/db/repl/oplog.h"
@@ -83,24 +84,10 @@ _ disallow system* manipulations from the database.
namespace mongo {
- // TODO SERVER-4328
- bool inDBRepair = false;
- struct doingRepair {
- doingRepair() {
- verify( ! inDBRepair );
- inDBRepair = true;
- }
- ~doingRepair() {
- inDBRepair = false;
- }
- };
-
/* ----------------------------------------- */
- const char FREELIST_NS[] = ".$freelist";
string pidfilepath;
DatabaseHolder _dbHolder;
- int MAGIC = 0x1000;
DatabaseHolder& dbHolderUnchecked() {
return _dbHolder;
@@ -131,41 +118,18 @@ namespace mongo {
/*---------------------------------------------------------------------*/
- // inheritable class to implement an operation that may be applied to all
- // files in a database using _applyOpToDataFiles()
- class FileOp {
- public:
- virtual ~FileOp() {}
- // Return true if file exists and operation successful
- virtual bool apply( const boost::filesystem::path &p ) = 0;
- virtual const char * op() const = 0;
- };
-
- void _applyOpToDataFiles(const char *database, FileOp &fo, bool afterAllocator = false,
- const string& path = storageGlobalParams.dbpath);
-
- void _deleteDataFiles(const char *database) {
- if (storageGlobalParams.directoryperdb) {
- FileAllocator::get()->waitUntilFinished();
- MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(
- boost::filesystem::remove_all(
- boost::filesystem::path(storageGlobalParams.dbpath) / database),
- "delete data files with a directoryperdb");
- return;
- }
- class : public FileOp {
- virtual bool apply( const boost::filesystem::path &p ) {
- return boost::filesystem::remove( p );
- }
- virtual const char * op() const {
- return "remove";
- }
- } deleter;
- _applyOpToDataFiles( database, deleter, true );
- }
+ /** { ..., capped: true, size: ..., max: ... }
+ * @param createDefaultIndexes - if false, defers id (and other) index creation.
+ * @return true if successful
+ */
+ bool userCreateNS(const char *ns, BSONObj options, string& err,
+ bool logForReplication, bool createDefaultIndexes ) {
+
+ LOG(1) << "create collection " << ns << ' ' << options;
- bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
- LOG(1) << "create collection " << ns << ' ' << options << endl;
+ massert(10356 ,
+ str::stream() << "invalid ns: " << ns,
+ NamespaceString::validCollectionComponent(ns));
Database* db = cc().database();
@@ -176,139 +140,28 @@ namespace mongo {
return false;
}
- long long size = Extent::initialSize(128);
- {
- BSONElement e = options.getField("size");
- if ( e.isNumber() ) {
- size = e.numberLong();
- uassert( 10083 , "create collection invalid size spec", size >= 0 );
-
- size += 0xff;
- size &= 0xffffffffffffff00LL;
- if ( size < Extent::minSize() )
- size = Extent::minSize();
- }
- }
-
- bool newCapped = false;
- long long mx = 0;
- if( options["capped"].trueValue() ) {
- newCapped = true;
- BSONElement e = options.getField("max");
- if ( e.isNumber() ) {
- mx = e.numberLong();
- uassert( 16495,
- "max in a capped collection has to be < 2^31 or not set",
- NamespaceDetails::validMaxCappedDocs(&mx) );
- }
- }
-
-
- collection = db->createCollection( ns,
- options["capped"].trueValue(),
- &options,
- false ); // we do it ourselves below
- verify( collection );
-
- // $nExtents just for debug/testing.
- BSONElement e = options.getField( "$nExtents" );
-
- if ( e.type() == Array ) {
- // We create one extent per array entry, with size specified
- // by the array value.
- BSONObjIterator i( e.embeddedObject() );
- while( i.more() ) {
- BSONElement e = i.next();
- int size = int( e.number() );
- verify( size <= 0x7fffffff );
- // $nExtents is just for testing - always allocate new extents
- // rather than reuse existing extents so we have some predictibility
- // in the extent size used by our tests
- collection->increaseStorageSize( (int)size, false );
- }
- }
- else if ( int( e.number() ) > 0 ) {
- // We create '$nExtents' extents, each of size 'size'.
- int nExtents = int( e.number() );
- verify( size <= 0x7fffffff );
- for ( int i = 0; i < nExtents; ++i ) {
- verify( size <= 0x7fffffff );
- // $nExtents is just for testing - always allocate new extents
- // rather than reuse existing extents so we have some predictibility
- // in the extent size used by our tests
- collection->increaseStorageSize( (int)size, false );
- }
- }
- else {
- // This is the non test case, where we don't have a $nExtents spec.
- while ( size > 0 ) {
- const int max = Extent::maxSize();
- const int min = Extent::minSize();
- int desiredExtentSize = static_cast<int> (size > max ? max : size);
- desiredExtentSize = static_cast<int> (desiredExtentSize < min ? min : desiredExtentSize);
-
- desiredExtentSize &= 0xffffff00;
- Extent* e = collection->increaseStorageSize( (int)desiredExtentSize, true );
- size -= e->length;
- }
- }
-
- NamespaceDetails *d = nsdetails(ns);
- verify(d);
-
- bool ensure = true;
-
- // respect autoIndexId if set. otherwise, create an _id index for all colls, except for
- // capped ones in local w/o autoIndexID (reason for the exception is for the oplog and
- // non-replicated capped colls)
- if( options.hasField( "autoIndexId" ) ||
- (newCapped && nsToDatabase( ns ) == "local" ) ) {
- ensure = options.getField( "autoIndexId" ).trueValue();
- }
-
- if( ensure ) {
- if( deferIdIndex )
- *deferIdIndex = true;
- else
- ensureIdIndexForNewNs( collection );
+ CollectionOptions collectionOptions;
+ Status status = collectionOptions.parse( options );
+ if ( !status.isOK() ) {
+ err = status.toString();
+ return false;
}
- if ( mx > 0 )
- d->setMaxCappedDocs( mx );
-
- return true;
- }
-
- /** { ..., capped: true, size: ..., max: ... }
- @param deferIdIndex - if not not, defers id index creation. sets the bool value to true if we wanted to create the id index.
- @return true if successful
- */
- bool userCreateNS(const char *ns, BSONObj options, string& err, bool logForReplication, bool *deferIdIndex) {
- const char *coll = strchr( ns, '.' ) + 1;
- massert(10356 ,
- str::stream() << "invalid ns: " << ns,
- NamespaceString::validCollectionComponent(ns));
+ invariant( db->createCollection( ns, collectionOptions, true, createDefaultIndexes ) );
- bool ok = _userCreateNS(ns, options, err, deferIdIndex);
- if ( logForReplication && ok ) {
+ if ( logForReplication ) {
if ( options.getField( "create" ).eoo() ) {
BSONObjBuilder b;
- b << "create" << coll;
+ b << "create" << nsToCollectionSubstring( ns );
b.appendElements( options );
options = b.obj();
}
string logNs = nsToDatabase(ns) + ".$cmd";
logOp("c", logNs.c_str(), options);
}
- return ok;
+ return true;
}
-}
-
-#include "clientcursor.h"
-
-namespace mongo {
-
void dropAllDatabasesExceptLocal() {
Lock::GlobalWrite lk;
@@ -347,231 +200,7 @@ namespace mongo {
Database::closeDatabase( d->name(), d->path() );
d = 0; // d is now deleted
- _deleteDataFiles( db.c_str() );
- }
-
- typedef boost::filesystem::path Path;
-
- void boostRenameWrapper( const Path &from, const Path &to ) {
- try {
- boost::filesystem::rename( from, to );
- }
- catch ( const boost::filesystem::filesystem_error & ) {
- // boost rename doesn't work across partitions
- boost::filesystem::copy_file( from, to);
- boost::filesystem::remove( from );
- }
- }
-
- // back up original database files to 'temp' dir
- void _renameForBackup( const char *database, const Path &reservedPath ) {
- Path newPath( reservedPath );
- if (storageGlobalParams.directoryperdb)
- newPath /= database;
- class Renamer : public FileOp {
- public:
- Renamer( const Path &newPath ) : newPath_( newPath ) {}
- private:
- const boost::filesystem::path &newPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boostRenameWrapper( p, newPath_ / ( p.leaf().string() + ".bak" ) );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } renamer( newPath );
- _applyOpToDataFiles( database, renamer, true );
- }
-
- // move temp files to standard data dir
- void _replaceWithRecovered( const char *database, const char *reservedPathString ) {
- Path newPath(storageGlobalParams.dbpath);
- if (storageGlobalParams.directoryperdb)
- newPath /= database;
- class Replacer : public FileOp {
- public:
- Replacer( const Path &newPath ) : newPath_( newPath ) {}
- private:
- const boost::filesystem::path &newPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boostRenameWrapper( p, newPath_ / p.leaf() );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } replacer( newPath );
- _applyOpToDataFiles( database, replacer, true, reservedPathString );
- }
-
- // generate a directory name for storing temp data files
- Path uniqueReservedPath( const char *prefix ) {
- Path repairPath = Path(storageGlobalParams.repairpath);
- Path reservedPath;
- int i = 0;
- bool exists = false;
- do {
- stringstream ss;
- ss << prefix << "_repairDatabase_" << i++;
- reservedPath = repairPath / ss.str();
- MONGO_ASSERT_ON_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
- }
- while ( exists );
- return reservedPath;
- }
-
- boost::intmax_t dbSize( const char *database ) {
- class SizeAccumulator : public FileOp {
- public:
- SizeAccumulator() : totalSize_( 0 ) {}
- boost::intmax_t size() const {
- return totalSize_;
- }
- private:
- virtual bool apply( const boost::filesystem::path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- totalSize_ += boost::filesystem::file_size( p );
- return true;
- }
- virtual const char *op() const {
- return "checking size";
- }
- boost::intmax_t totalSize_;
- };
- SizeAccumulator sa;
- _applyOpToDataFiles( database, sa );
- return sa.size();
- }
-
- bool repairDatabase( string dbNameS , string &errmsg,
- bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
- doingRepair dr;
- dbNameS = nsToDatabase( dbNameS );
- const char * dbName = dbNameS.c_str();
-
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- string localhost = ss.str();
-
- problem() << "repairDatabase " << dbName << endl;
- verify( cc().database()->name() == dbName );
- verify(cc().database()->path() == storageGlobalParams.dbpath);
-
- BackgroundOperation::assertNoBgOpInProgForDb(dbName);
-
- getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
-
- boost::intmax_t totalSize = dbSize( dbName );
- boost::intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
- if ( freeSize > -1 && freeSize < totalSize ) {
- stringstream ss;
- ss << "Cannot repair database " << dbName << " having size: " << totalSize
- << " (bytes) because free disk space is: " << freeSize << " (bytes)";
- errmsg = ss.str();
- problem() << errmsg << endl;
- return false;
- }
-
- killCurrentOp.checkForInterrupt();
-
- Path reservedPath =
- uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
- "backup" : "_tmp" );
- MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
- string reservedPathString = reservedPath.string();
-
- bool res;
- {
- // clone to temp location, which effectively does repair
- Client::Context ctx( dbName, reservedPathString );
- verify( ctx.justCreated() );
-
-
- CloneOptions cloneOptions;
- cloneOptions.fromDB = dbName;
- cloneOptions.logForRepl = false;
- cloneOptions.slaveOk = false;
- cloneOptions.useReplAuth = false;
- cloneOptions.snapshot = false;
- cloneOptions.mayYield = false;
- cloneOptions.mayBeInterrupted = true;
- res = Cloner::cloneFrom(ctx, localhost, cloneOptions, errmsg );
-
- Database::closeDatabase( dbName, reservedPathString.c_str() );
- }
-
- getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
- MongoFile::flushAll(true); // need both in case journaling is disabled
-
- if ( !res ) {
- errmsg = str::stream() << "clone failed for " << dbName << " with error: " << errmsg;
- problem() << errmsg << endl;
-
- if ( !preserveClonedFilesOnFailure )
- MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
-
- return false;
- }
-
- Client::Context ctx( dbName );
- Database::closeDatabase(dbName, storageGlobalParams.dbpath);
-
- if ( backupOriginalFiles ) {
- _renameForBackup( dbName, reservedPath );
- }
- else {
- _deleteDataFiles( dbName );
- MONGO_ASSERT_ON_EXCEPTION(
- boost::filesystem::create_directory(Path(storageGlobalParams.dbpath) / dbName));
- }
-
- _replaceWithRecovered( dbName, reservedPathString.c_str() );
-
- if ( !backupOriginalFiles )
- MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
-
- return true;
- }
-
- void _applyOpToDataFiles( const char *database, FileOp &fo, bool afterAllocator, const string& path ) {
- if ( afterAllocator )
- FileAllocator::get()->waitUntilFinished();
- string c = database;
- c += '.';
- boost::filesystem::path p(path);
- if (storageGlobalParams.directoryperdb)
- p /= database;
- boost::filesystem::path q;
- q = p / (c+"ns");
- bool ok = false;
- MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
- if ( ok ) {
- LOG(2) << fo.op() << " file " << q.string() << endl;
- }
- int i = 0;
- int extra = 10; // should not be necessary, this is defensive in case there are missing files
- while ( 1 ) {
- verify( i <= DiskLoc::MaxFiles );
- stringstream ss;
- ss << c << i;
- q = p / ss.str();
- MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply(q) );
- if ( ok ) {
- if ( extra != 10 ) {
- LOG(1) << fo.op() << " file " << q.string() << endl;
- log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
- }
- }
- else if ( --extra <= 0 )
- break;
- i++;
- }
+ _deleteDataFiles( db );
}
} // namespace mongo
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
index ad79a8f59ff..ec38a2523d2 100644
--- a/src/mongo/db/pdfile.h
+++ b/src/mongo/db/pdfile.h
@@ -58,14 +58,12 @@ namespace mongo {
class Record;
void dropDatabase(const std::string& db);
- bool repairDatabase(string db, string &errmsg, bool preserveClonedFilesOnFailure = false, bool backupOriginalFiles = false);
- bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
+ bool userCreateNS(const char *ns, BSONObj j, string& err,
+ bool logForReplication, bool createDefaultIndexes = true );
/*---------------------------------------------------------------------*/
- boost::intmax_t dbSize( const char *database );
-
inline NamespaceIndex* nsindex(const StringData& ns) {
Database *database = cc().database();
verify( database );
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
new file mode 100644
index 00000000000..06fdfe8cf03
--- /dev/null
+++ b/src/mongo/db/repair_database.cpp
@@ -0,0 +1,428 @@
+// repair_database.cpp
+
+/**
+* Copyright (C) 2014 MongoDB Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#include "mongo/db/repair_database.h"
+
+#include <boost/filesystem/operations.hpp>
+
+#include "mongo/db/background.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/client.h"
+#include "mongo/db/cloner.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/kill_current_op.h"
+#include "mongo/db/structure/collection_iterator.h"
+#include "mongo/util/file.h"
+#include "mongo/util/file_allocator.h"
+
+namespace mongo {
+
+ typedef boost::filesystem::path Path;
+
+ // TODO SERVER-4328
+ bool inDBRepair = false;
+ struct doingRepair {
+ doingRepair() {
+ verify( ! inDBRepair );
+ inDBRepair = true;
+ }
+ ~doingRepair() {
+ inDBRepair = false;
+ }
+ };
+
+ // inheritable class to implement an operation that may be applied to all
+ // files in a database using _applyOpToDataFiles()
+ class FileOp {
+ public:
+ virtual ~FileOp() {}
+ // Return true if file exists and operation successful
+ virtual bool apply( const boost::filesystem::path &p ) = 0;
+ virtual const char * op() const = 0;
+ };
+
+ void _applyOpToDataFiles(const string& database, FileOp &fo, bool afterAllocator = false,
+ const string& path = storageGlobalParams.dbpath);
+
+ void _deleteDataFiles(const std::string& database) {
+ if (storageGlobalParams.directoryperdb) {
+ FileAllocator::get()->waitUntilFinished();
+ MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(
+ boost::filesystem::remove_all(
+ boost::filesystem::path(storageGlobalParams.dbpath) / database),
+ "delete data files with a directoryperdb");
+ return;
+ }
+ class : public FileOp {
+ virtual bool apply( const boost::filesystem::path &p ) {
+ return boost::filesystem::remove( p );
+ }
+ virtual const char * op() const {
+ return "remove";
+ }
+ } deleter;
+ _applyOpToDataFiles( database, deleter, true );
+ }
+
+ void boostRenameWrapper( const Path &from, const Path &to ) {
+ try {
+ boost::filesystem::rename( from, to );
+ }
+ catch ( const boost::filesystem::filesystem_error & ) {
+ // boost rename doesn't work across partitions
+ boost::filesystem::copy_file( from, to);
+ boost::filesystem::remove( from );
+ }
+ }
+
+ // back up original database files to 'temp' dir
+ void _renameForBackup( const std::string& database, const Path &reservedPath ) {
+ Path newPath( reservedPath );
+ if (storageGlobalParams.directoryperdb)
+ newPath /= database;
+ class Renamer : public FileOp {
+ public:
+ Renamer( const Path &newPath ) : newPath_( newPath ) {}
+ private:
+ const boost::filesystem::path &newPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boostRenameWrapper( p, newPath_ / ( p.leaf().string() + ".bak" ) );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer( newPath );
+ _applyOpToDataFiles( database, renamer, true );
+ }
+
+ boost::intmax_t dbSize( const string& database ) {
+ class SizeAccumulator : public FileOp {
+ public:
+ SizeAccumulator() : totalSize_( 0 ) {}
+ boost::intmax_t size() const {
+ return totalSize_;
+ }
+ private:
+ virtual bool apply( const boost::filesystem::path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ totalSize_ += boost::filesystem::file_size( p );
+ return true;
+ }
+ virtual const char *op() const {
+ return "checking size";
+ }
+ boost::intmax_t totalSize_;
+ };
+ SizeAccumulator sa;
+ _applyOpToDataFiles( database, sa );
+ return sa.size();
+ }
+
+ // move temp files to standard data dir
+ void _replaceWithRecovered( const string& database, const char *reservedPathString ) {
+ Path newPath(storageGlobalParams.dbpath);
+ if (storageGlobalParams.directoryperdb)
+ newPath /= database;
+ class Replacer : public FileOp {
+ public:
+ Replacer( const Path &newPath ) : newPath_( newPath ) {}
+ private:
+ const boost::filesystem::path &newPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boostRenameWrapper( p, newPath_ / p.leaf() );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } replacer( newPath );
+ _applyOpToDataFiles( database, replacer, true, reservedPathString );
+ }
+
+ // generate a directory name for storing temp data files
+ Path uniqueReservedPath( const char *prefix ) {
+ Path repairPath = Path(storageGlobalParams.repairpath);
+ Path reservedPath;
+ int i = 0;
+ bool exists = false;
+ do {
+ stringstream ss;
+ ss << prefix << "_repairDatabase_" << i++;
+ reservedPath = repairPath / ss.str();
+ MONGO_ASSERT_ON_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
+ }
+ while ( exists );
+ return reservedPath;
+ }
+
+ void _applyOpToDataFiles( const string& database, FileOp &fo, bool afterAllocator, const string& path ) {
+ if ( afterAllocator )
+ FileAllocator::get()->waitUntilFinished();
+ string c = database;
+ c += '.';
+ boost::filesystem::path p(path);
+ if (storageGlobalParams.directoryperdb)
+ p /= database;
+ boost::filesystem::path q;
+ q = p / (c+"ns");
+ bool ok = false;
+ MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
+ if ( ok ) {
+ LOG(2) << fo.op() << " file " << q.string() << endl;
+ }
+ int i = 0;
+ int extra = 10; // should not be necessary, this is defensive in case there are missing files
+ while ( 1 ) {
+ verify( i <= DiskLoc::MaxFiles );
+ stringstream ss;
+ ss << c << i;
+ q = p / ss.str();
+ MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply(q) );
+ if ( ok ) {
+ if ( extra != 10 ) {
+ LOG(1) << fo.op() << " file " << q.string() << endl;
+ log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ }
+ }
+ else if ( --extra <= 0 )
+ break;
+ i++;
+ }
+ }
+
+ class RepairFileDeleter {
+ public:
+ RepairFileDeleter( const Path& path )
+ : _path( path ),
+ _succces( false ) {
+ }
+
+ ~RepairFileDeleter() {
+ if ( !_succces ) {
+ MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( _path ) );
+ }
+ }
+
+ void success() {
+ _succces = true;
+ }
+
+ private:
+ Path _path;
+ bool _succces;
+ };
+
+ Status repairDatabase( string dbName,
+ bool preserveClonedFilesOnFailure,
+ bool backupOriginalFiles ) {
+ scoped_ptr<RepairFileDeleter> repairFileDeleter;
+ doingRepair dr;
+ dbName = nsToDatabase( dbName );
+
+ log() << "repairDatabase " << dbName << endl;
+
+ invariant( cc().database()->name() == dbName );
+ invariant( cc().database()->path() == storageGlobalParams.dbpath );
+
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+
+ getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
+
+ boost::intmax_t totalSize = dbSize( dbName );
+ boost::intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
+
+ if ( freeSize > -1 && freeSize < totalSize ) {
+ return Status( ErrorCodes::OutOfDiskSpace,
+ str::stream() << "Cannot repair database " << dbName
+ << " having size: " << totalSize
+ << " (bytes) because free disk space is: " << freeSize << " (bytes)" );
+ }
+
+ killCurrentOp.checkForInterrupt();
+
+ Path reservedPath =
+ uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
+ "backup" : "_tmp" );
+ MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
+ string reservedPathString = reservedPath.string();
+
+ if ( !preserveClonedFilesOnFailure )
+ repairFileDeleter.reset( new RepairFileDeleter( reservedPath ) );
+
+ {
+ Database* originalDatabase = dbHolder().get( dbName, storageGlobalParams.dbpath );
+ if ( originalDatabase == NULL )
+ return Status( ErrorCodes::NamespaceNotFound, "database does not exist to repair" );
+
+ Database* tempDatabase = NULL;
+ {
+ bool justCreated = false;
+ tempDatabase = dbHolderW().getOrCreate( dbName, reservedPathString, justCreated );
+ invariant( justCreated );
+ }
+
+ map<string,CollectionOptions> namespacesToCopy;
+ {
+ string ns = dbName + ".system.namespaces";
+ Client::Context ctx( ns );
+ Collection* coll = originalDatabase->getCollection( ns );
+ if ( coll ) {
+ scoped_ptr<CollectionIterator> it( coll->getIterator( DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD ) );
+ while ( !it->isEOF() ) {
+ DiskLoc loc = it->getNext();
+ BSONObj obj = coll->docFor( loc );
+
+ string ns = obj["name"].String();
+
+ NamespaceString nss( ns );
+ if ( nss.isSystem() ) {
+ if ( nss.isSystemDotIndexes() )
+ continue;
+ if ( nss.coll() == "system.namespaces" )
+ continue;
+ }
+
+ if ( !nss.isNormal() )
+ continue;
+
+ CollectionOptions options;
+ if ( obj["options"].isABSONObj() ) {
+ Status status = options.parse( obj["options"].Obj() );
+ if ( !status.isOK() )
+ return status;
+ }
+ namespacesToCopy[ns] = options;
+ }
+ }
+ }
+
+ for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
+ i != namespacesToCopy.end();
+ ++i ) {
+ string ns = i->first;
+ CollectionOptions options = i->second;
+
+ Collection* tempCollection = NULL;
+ {
+ Client::Context tempContext( ns, tempDatabase );
+ tempCollection = tempDatabase->createCollection( ns, options, true, false );
+ }
+
+ Client::Context readContext( ns, originalDatabase );
+ Collection* originalCollection = originalDatabase->getCollection( ns );
+ invariant( originalCollection );
+
+ // data
+
+ MultiIndexBlock indexBlock( tempCollection );
+ {
+ vector<BSONObj> indexes;
+ IndexCatalog::IndexIterator ii =
+ originalCollection->getIndexCatalog()->getIndexIterator( false );
+ while ( ii.more() ) {
+ IndexDescriptor* desc = ii.next();
+ indexes.push_back( desc->infoObj() );
+ }
+
+ Client::Context tempContext( ns, tempDatabase );
+ Status status = indexBlock.init( indexes );
+ if ( !status.isOK() )
+ return status;
+
+ }
+
+ scoped_ptr<CollectionIterator> iterator( originalCollection->getIterator( DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD ) );
+ while ( !iterator->isEOF() ) {
+ DiskLoc loc = iterator->getNext();
+ invariant( !loc.isNull() );
+
+ BSONObj doc = originalCollection->docFor( loc );
+
+ Client::Context tempContext( ns, tempDatabase );
+ StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock );
+ if ( !result.isOK() )
+ return result.getStatus();
+
+ getDur().commitIfNeeded();
+ killCurrentOp.checkForInterrupt(false);
+ }
+
+ {
+ Client::Context tempContext( ns, tempDatabase );
+ Status status = indexBlock.commit();
+ if ( !status.isOK() )
+ return status;
+ }
+
+ }
+
+ Client::Context tempContext( dbName, reservedPathString );
+ Database::closeDatabase( dbName, reservedPathString );
+ }
+
+ Client::Context ctx( dbName );
+ Database::closeDatabase(dbName, storageGlobalParams.dbpath);
+
+
+ if ( backupOriginalFiles ) {
+ _renameForBackup( dbName, reservedPath );
+ }
+ else {
+ _deleteDataFiles( dbName );
+ MONGO_ASSERT_ON_EXCEPTION(
+ boost::filesystem::create_directory(Path(storageGlobalParams.dbpath) / dbName));
+ }
+
+ if ( repairFileDeleter.get() )
+ repairFileDeleter->success();
+
+ _replaceWithRecovered( dbName, reservedPathString.c_str() );
+
+ if ( !backupOriginalFiles )
+ MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+
+ return Status::OK();
+ }
+
+
+}
diff --git a/src/mongo/db/repair_database.h b/src/mongo/db/repair_database.h
new file mode 100644
index 00000000000..4fffe435558
--- /dev/null
+++ b/src/mongo/db/repair_database.h
@@ -0,0 +1,52 @@
+// repair_database.h
+
+/**
+* Copyright (C) 2014 MongoDB Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#pragma once
+
+#include <string>
+
+#include "mongo/base/status.h"
+#include "mongo/platform/cstdint.h"
+
+namespace mongo {
+
+ // TODO: move
+ intmax_t dbSize( const std::string& database );
+
+ // TODO: move
+ void _deleteDataFiles(const std::string& database);
+
+ // must have a global lock
+ Status repairDatabase( std::string db,
+ bool preserveClonedFilesOnFailure = false,
+ bool backupOriginalFiles = false );
+
+
+} // namespace mongo
diff --git a/src/mongo/db/structure/collection_compact.cpp b/src/mongo/db/structure/collection_compact.cpp
index 0bf9633fe00..ca717cb0889 100644
--- a/src/mongo/db/structure/collection_compact.cpp
+++ b/src/mongo/db/structure/collection_compact.cpp
@@ -32,6 +32,7 @@
#include "mongo/base/counter.h"
#include "mongo/base/owned_pointer_map.h"
+#include "mongo/db/catalog/index_create.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/curop.h"
@@ -75,7 +76,7 @@ namespace mongo {
};
void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
- vector<IndexAccessMethod*>& indexesToInsertTo,
+ MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactStats* stats ) {
log() << "compact begin extent #" << extentNumber
@@ -154,13 +155,7 @@ namespace mongo {
options.logIfError = false;
options.dupsAllowed = true; // in compact we should be doing no checking
- for ( size_t i = 0; i < indexesToInsertTo.size(); i++ ) {
- Status idxStatus = indexesToInsertTo[i]->insert( objOld,
- status.getValue(),
- options,
- NULL );
- uassertStatusOK( idxStatus );
- }
+ indexesToInsertTo.insert( objOld, status.getValue(), options );
}
if( L.isNull() ) {
@@ -289,37 +284,14 @@ namespace mongo {
getDur().commitIfNeeded();
- CompactStats stats;
+ killCurrentOp.checkForInterrupt(false);
- OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks;
- vector<IndexAccessMethod*> indexesToInsertTo;
- vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit;
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- killCurrentOp.checkForInterrupt(false);
- BSONObj info = indexSpecs[i];
- info = _compactAdjustIndexSpec( info );
- info = _indexCatalog.fixIndexSpec( info );
- auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) );
- Status status = block->init();
- if ( !status.isOK() )
- return StatusWith<CompactStats>(status);
-
- IndexAccessMethod* accessMethod = block->getEntry()->accessMethod();
- status = accessMethod->initializeAsEmpty();
- if ( !status.isOK() )
- return StatusWith<CompactStats>(status);
-
- IndexAccessMethod* bulk = accessMethod->initiateBulk();
- if ( bulk ) {
- indexesToInsertTo.push_back( bulk );
- bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) );
- }
- else {
- indexesToInsertTo.push_back( accessMethod );
- }
+ CompactStats stats;
- indexBuildBlocks.mutableVector().push_back( block.release() );
- }
+ MultiIndexBlock multiIndexBlock( this );
+ status = multiIndexBlock.init( indexSpecs );
+ if ( !status.isOK() )
+ return StatusWith<CompactStats>( status );
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
@@ -331,7 +303,7 @@ namespace mongo {
int extentNumber = 0;
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
- _compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats );
+ _compactExtent(*i, extentNumber++, multiIndexBlock, compactOptions, &stats );
pm.hit();
}
@@ -342,14 +314,9 @@ namespace mongo {
log() << "starting index commits";
- for ( size_t i = 0; i < bulkToCommit.size(); i++ ) {
- bulkToCommit[i].first->commitBulk( bulkToCommit[i].second, false, NULL );
- }
-
- for ( size_t i = 0; i < indexBuildBlocks.size(); i++ ) {
- IndexCatalog::IndexBuildBlock* block = indexBuildBlocks.mutableVector()[i];
- block->success();
- }
+ status = multiIndexBlock.commit();
+ if ( !status.isOK() )
+ return StatusWith<CompactStats>( status );
return StatusWith<CompactStats>( stats );
}
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 01e0fde9cee..e8881b8a972 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,7 +51,7 @@ namespace CountTests {
if ( _collection ) {
_database->dropCollection( ns() );
}
- _collection = _database->createCollection( ns(), false, NULL, true );
+ _collection = _database->createCollection( ns() );
addIndex( fromjson( "{\"a\":1}" ) );
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 3e2da8aa01a..706b2e482ea 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -372,8 +372,10 @@ namespace IndexUpdateTests {
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.ctx().db();
db->dropCollection( _ns );
- const BSONObj collOptions = BSON( "size" << (10 * 1024) );
- Collection* coll = db->createCollection( _ns, true, &collOptions );
+ CollectionOptions options;
+ options.capped = true;
+ options.cappedSize = 10 * 1024;
+ Collection* coll = db->createCollection( _ns, options );
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents.
int32_t nDocs = 1000;
@@ -402,8 +404,10 @@ namespace IndexUpdateTests {
// Recreate the collection as capped, without an _id index.
Database* db = _ctx.ctx().db();
db->dropCollection( _ns );
- const BSONObj collOptions = BSON( "size" << (10 * 1024) );
- Collection* coll = db->createCollection( _ns, true, &collOptions );
+ CollectionOptions options;
+ options.capped = true;
+ options.cappedSize = 10 * 1024;
+ Collection* coll = db->createCollection( _ns, options );
coll->getIndexCatalog()->dropAllIndexes( true );
// Insert some documents.
int32_t nDocs = 1000;
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 516679c1102..5635f0ebd10 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1429,6 +1429,7 @@ namespace NamespaceTests {
public:
void run() {
create();
+ ASSERT( nsd()->isCapped() );
ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
DiskLoc loc = nsd()->alloc( collection(), ns(), 300 );
ASSERT_EQUALS( 300, loc.rec()->lengthWithHeaders() );
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 85146e574bf..a7d1da383e0 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -39,7 +39,7 @@ namespace OplogStartTests {
Base() : _context(ns()) {
Collection* c = _context.db()->getCollection(ns());
if (!c) {
- c = _context.db()->createCollection(ns(), false, NULL, true);
+ c = _context.db()->createCollection(ns());
}
c->getIndexCatalog()->ensureHaveIdIndex();
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index f4ea0a1a4e7..ae2e37dd2eb 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -167,6 +167,31 @@ namespace PdfileTests {
}
};
+ class CollectionOptionsRoundTrip {
+ public:
+
+ void check( const CollectionOptions& options1 ) {
+ CollectionOptions options2;
+ options2.parse( options1.toBSON() );
+ ASSERT_EQUALS( options1.toBSON(), options2.toBSON() );
+ }
+
+ void run() {
+ CollectionOptions options;
+ check( options );
+
+ options.capped = true;
+ options.cappedSize = 10240;
+ options.cappedMaxDocs = 1111;
+ check( options );
+
+ options.setNoIdIndex();
+ options.flags = 5;
+ check( options );
+
+ }
+ };
+
class All : public Suite {
public:
All() : Suite( "pdfile" ) {}
@@ -176,6 +201,7 @@ namespace PdfileTests {
add< Insert::UpdateDate >();
add< Insert::ValidId >();
add< ExtentSizing >();
+ add< CollectionOptionsRoundTrip >();
}
} myall;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 10f3807bc54..5137a7915f3 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -63,7 +63,7 @@ namespace QueryTests {
if ( _collection ) {
_database->dropCollection( ns() );
}
- _collection = _database->createCollection( ns(), false, NULL, true );
+ _collection = _database->createCollection( ns() );
addIndex( fromjson( "{\"a\":1}" ) );
}
~Base() {
@@ -160,8 +160,7 @@ namespace QueryTests {
_collection = NULL;
db->dropCollection( ns() );
}
- BSONObj options = BSON("autoIndexId" << 0 );
- _collection = db->createCollection( ns(), false, &options );
+ _collection = db->createCollection( ns(), CollectionOptions(), true, false );
ASSERT( _collection );
DBDirectClient cl;
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 181f91d35bd..9f720c1d0ca 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -66,7 +66,7 @@ namespace ReplTests {
Collection* c = _context.db()->getCollection( ns() );
if ( ! c ) {
- c = _context.db()->createCollection( ns(), false, NULL, true );
+ c = _context.db()->createCollection( ns() );
}
c->getIndexCatalog()->ensureHaveIdIndex();
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index fb9d4bd021d..c27a9a388da 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1642,10 +1642,7 @@ namespace mongo {
<< endl;
}
else {
- db->createCollection( ns,
- false /* capped */,
- NULL /* options */,
- true /* allocateDefaultSpace */ );
+ db->createCollection( ns );
}
}
}