summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-01-06 18:41:07 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-01-07 15:04:37 -0500
commit47b931ab41098de23c967f0add30ef7b1a4bdda2 (patch)
tree098d0389f260f9e8a81f937940fe06fcd48c496b /src/mongo/db
parent67c550459e416a9ec7eabe35e38c1fed9aeccb8f (diff)
downloadmongo-47b931ab41098de23c967f0add30ef7b1a4bdda2.tar.gz
SERVER-16711 Fix failing disk/diskfull.js suite
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/cloner.cpp41
-rw-r--r--src/mongo/db/dbcommands.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp18
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp86
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp12
5 files changed, 95 insertions, 65 deletions
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 22a1101d115..f81806f01c9 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -464,12 +464,15 @@ namespace mongo {
}
}
+ // Gather the list of collections to clone
list<BSONObj> toClone;
- if ( clonedColls ) clonedColls->clear();
+ if (clonedColls) {
+ clonedColls->clear();
+ }
+
{
- /* todo: we can put these releases inside dbclient or a dbclient specialization.
- or just wait until we get rid of global lock anyway.
- */
+ // getCollectionInfos may make a remote call, which may block indefinitely, so release
+ // the global lock that we are entering with.
Lock::TempRelease tempRelease(txn->lockState());
list<BSONObj> raw = _conn->getCollectionInfos( opts.fromDB );
@@ -496,7 +499,7 @@ namespace mongo {
verify( !e.eoo() );
verify( e.type() == String );
- NamespaceString ns( opts.fromDB, e.valuestr() );
+ const NamespaceString ns(opts.fromDB, e.valuestr());
if( ns.isSystem() ) {
/* system.users and s.js is cloned -- but nothing else from system.
@@ -519,7 +522,10 @@ namespace mongo {
LOG(2) << "\t\t not ignoring collection " << ns;
}
- if ( clonedColls ) clonedColls->insert( ns.ns() );
+ if (clonedColls) {
+ clonedColls->insert(ns.ns());
+ }
+
toClone.push_back( collection.getOwned() );
}
}
@@ -531,27 +537,29 @@ namespace mongo {
const char* collectionName = collection["name"].valuestr();
BSONObj options = collection.getObjectField("options");
- NamespaceString from_name( opts.fromDB, collectionName );
- NamespaceString to_name( toDBName, collectionName );
+ const NamespaceString from_name(opts.fromDB, collectionName);
+ const NamespaceString to_name(toDBName, collectionName);
+
+ Database* db = dbHolder().openDb(txn, toDBName);
- Database* db;
{
WriteUnitOfWork wunit(txn);
- // Copy releases the lock, so we need to re-load the database. This should
- // probably throw if the database has changed in between, but for now preserve
- // the existing behaviour.
- db = dbHolder().openDb(txn, toDBName);
// we defer building id index for performance - building it in batch is much
// faster
- Status createStatus = userCreateNS( txn, db, to_name.ns(), options,
- opts.logForRepl, false );
+ Status createStatus = userCreateNS(txn,
+ db,
+ to_name.ns(),
+ options,
+ opts.logForRepl,
+ false);
if ( !createStatus.isOK() ) {
errmsg = str::stream() << "failed to create collection \""
<< to_name.ns() << "\": "
<< createStatus.reason();
return false;
}
+
wunit.commit();
}
@@ -571,6 +579,9 @@ namespace mongo {
opts.mayBeInterrupted,
q);
+ // Copy releases the lock, so we need to re-load the database. This should
+ // probably throw if the database has changed in between, but for now preserve
+ // the existing behaviour.
db = dbHolder().get(txn, toDBName);
uassert(18645,
str::stream() << "database " << toDBName << " dropped during clone",
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 0453a3ca4c8..a71fae2e5dc 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -581,9 +581,10 @@ namespace mongo {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
+ WriteUnitOfWork wunit(txn);
+
// Create collection.
status = userCreateNS(txn, ctx.db(), ns.c_str(), options, !fromRepl);
if ( !status.isOK() ) {
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index cf5011e6b2b..c4021faa522 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -150,7 +150,7 @@ namespace mongo {
MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( boost::filesystem::create_directory( dir ), "create dir for db " );
}
- NOINLINE_DECL void NamespaceIndex::init(OperationContext* txn) {
+ void NamespaceIndex::init(OperationContext* txn) {
invariant(!_ht.get());
unsigned long long len = 0;
@@ -223,12 +223,16 @@ namespace mongo {
}
if (_f.create(pathString, l, true)) {
- // The writes done in this function must not be rolled back. If the containing
- // UnitOfWork rolls back it should roll back to the state *after* these writes. This
- // will leave the file empty, but available for future use. That is why we go
- // directly to the global dur dirty list rather than going through the
- // OperationContext.
- getDur().createdFile(pathString, l); // always a new file
+ // The writes done in this function must not be rolled back. This will leave the
+ // file empty, but available for future use. That is why we go directly to the
+ // global dur dirty list rather than going through the OperationContext.
+ getDur().createdFile(pathString, l);
+
+ // Commit the journal and all changes to disk so that even if exceptions occur
+ // during subsequent initialization, we won't have uncommited changes during file
+ // close.
+ getDur().commitNow(txn);
+
len = l;
invariant(len == mmapv1GlobalOptions.lenForNewNsFiles);
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 5b3c35fb59b..529c46b37c6 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -168,32 +168,20 @@ namespace {
invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
try {
- _init( txn );
-
- std::list<std::string> namespaces;
- _namespaceIndex.getCollectionNamespaces( &namespaces );
- for ( std::list<std::string>::const_iterator i = namespaces.begin();
- i != namespaces.end(); // we add to the list in the loop so can't cache end().
- ++i ) {
-
- const std::string& ns = *i;
- Entry*& entry = _collections[ns];
-
- // Entry was already loaded for system.indexes and system.namespaces in _init. That
- // is ok, since they can't have indexes on them anyway.
- if (!entry) {
- entry = new Entry();
- _insertInCache(txn, ns, entry);
-
- // Add the indexes on this namespace to the list of namespaces to load.
- std::vector<std::string> indexNames;
- entry->catalogEntry->getAllIndexes(txn, &indexNames);
- for (size_t i = 0; i < indexNames.size(); i++) {
- namespaces.push_back(
- IndexDescriptor::makeIndexNamespace(ns, indexNames[i]));
- }
- }
+ // First init the .ns file. If this fails, we may leak the .ns file, but this is OK
+ // because subsequent openDB will go through this code path again.
+ _namespaceIndex.init(txn);
+
+ // Initialize the extent manager. This will create the first data file (.0) if needed
+ // and if this fails we would leak the .ns file above. Leaking the .ns or .0 file is
+ // acceptable, because subsequent openDB calls will exercise the code path again.
+ Status s = _extentManager.init(txn);
+ if (!s.isOK()) {
+ msgasserted(16966, str::stream() << "_extentManager.init failed: " << s.toString());
}
+
+ // This is the actual loading of the on-disk structures into cache.
+ _init( txn );
}
catch (const DBException& dbe) {
warning() << "database " << path << " " << name
@@ -515,15 +503,6 @@ namespace {
}
void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
- // First init the .ns file
- _namespaceIndex.init(txn);
-
- // Initialize the extent manager
- Status s = _extentManager.init(txn);
- if (!s.isOK()) {
- msgasserted(16966, str::stream() << "_extentManager.init failed: " << s.toString());
- }
-
WriteUnitOfWork wunit(txn);
// Upgrade freelist
@@ -584,10 +563,6 @@ namespace {
md,
&_extentManager,
false));
-
- if (nsEntry->recordStore->storageSize(txn) == 0) {
- nsEntry->recordStore->increaseStorageSize(txn, _extentManager.initialSize(128), false);
- }
}
if (!indexEntry) {
@@ -603,10 +578,6 @@ namespace {
md,
&_extentManager,
true));
-
- if (indexEntry->recordStore->storageSize(txn) == 0) {
- indexEntry->recordStore->increaseStorageSize(txn, _extentManager.initialSize(128), false);
- }
}
if (isSystemIndexesGoingToBeNew) {
@@ -630,6 +601,37 @@ namespace {
}
wunit.commit();
+
+ // Now put everything in the cache of namespaces. None of the operations below do any
+ // transactional operations.
+ std::list<std::string> namespaces;
+ _namespaceIndex.getCollectionNamespaces(&namespaces);
+
+ for (std::list<std::string>::const_iterator i = namespaces.begin();
+ i != namespaces.end(); // we add to the list in the loop so can't cache end().
+ i++) {
+
+ const std::string& ns = *i;
+ Entry*& entry = _collections[ns];
+
+ // The two cases where entry is not null is for system.indexes and system.namespaces,
+ // which we manually instantiated above. It is OK to skip these two collections,
+ // because they don't have indexes on them anyway.
+ if (entry) {
+ continue;
+ }
+
+ entry = new Entry();
+ _insertInCache(txn, ns, entry);
+
+ // Add the indexes on this namespace to the list of namespaces to load.
+ std::vector<std::string> indexNames;
+ entry->catalogEntry->getAllIndexes(txn, &indexNames);
+
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ namespaces.push_back(IndexDescriptor::makeIndexNamespace(ns, indexNames[i]));
+ }
+ }
}
Status MMAPV1DatabaseCatalogEntry::createCollection( OperationContext* txn,
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 53cd11507c6..7f1d873b369 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -174,6 +174,18 @@ namespace mongo {
_files.push_back( df.release() );
}
+ // If this is a new database being created, instantiate the first file and one extent so
+ // we can have a coherent database.
+ if (_files.empty()) {
+ WriteUnitOfWork wuow(txn);
+ _createExtent(txn, initialSize(128), false);
+ wuow.commit();
+
+ // Commit the journal and all changes to disk so that even if exceptions occur during
+ // subsequent initialization, we won't have uncommited changes during file close.
+ getDur().commitNow(txn);
+ }
+
return Status::OK();
}