summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2014-09-29 17:53:20 -0400
committerGeert Bosch <geert@mongodb.com>2014-09-29 17:54:36 -0400
commit8e83e72512fcb8eb8f06987927766c0b77cea23e (patch)
tree21167749db25a7124a528157f20d1f7a3be4beee
parent57af98451c500c5d8112cfc5e75917a0e561069f (diff)
downloadmongo-8e83e72512fcb8eb8f06987927766c0b77cea23e.tar.gz
BF-532: Revert "SERVER-14668: Replace uses of DBWrite lock with DBLock"
This reverts commit ae333bc94a7d89d3220dcae9579fcaf68aa2e290. This reverts commit 962f959a09b63aa0482d7e0c9bad89363d1e1194.
-rw-r--r--src/mongo/db/client.cpp2
-rw-r--r--src/mongo/db/client.h2
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/commands/apply_ops.cpp26
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp2
-rw-r--r--src/mongo/db/commands/compact.cpp4
-rw-r--r--src/mongo/db/commands/copydb.cpp15
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp4
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp16
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp12
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp5
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h19
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp4
-rw-r--r--src/mongo/db/dbcommands.cpp10
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp2
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/instance.cpp6
-rw-r--r--src/mongo/db/introspect.cpp9
-rw-r--r--src/mongo/db/repl/minvalid.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp6
-rw-r--r--src/mongo/db/repl/repl_coordinator_external_state_impl.cpp6
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp6
-rw-r--r--src/mongo/dbtests/counttests.cpp5
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp8
-rw-r--r--src/mongo/dbtests/threadedtests.cpp8
-rw-r--r--src/mongo/s/d_merge.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp10
-rw-r--r--src/mongo/s/d_split.cpp10
-rw-r--r--src/mongo/s/d_state.cpp4
-rw-r--r--src/mongo/s/metadata_loader.h4
38 files changed, 127 insertions, 124 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 812e062ede9..17901be98de 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -238,7 +238,7 @@ namespace mongo {
Client::WriteContext::WriteContext(
OperationContext* opCtx, const std::string& ns, bool doVersion)
- : _lk(opCtx->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X),
+ : _lk(opCtx->lockState(), ns),
_wunit(opCtx),
_c(opCtx, ns, doVersion) {
}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 265b8439037..ef78f899154 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -204,7 +204,7 @@ namespace mongo {
Context& ctx() { return _c; }
private:
- Lock::DBLock _lk;
+ Lock::DBWrite _lk;
WriteUnitOfWork _wunit;
Context _c;
};
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 67c76b071e9..1686cfe2c10 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -308,15 +308,15 @@ namespace mongo {
bool logForRepl) {
const NamespaceString nss(ns);
- const string dbname = nss.db().toString();
+ Lock::DBWrite dbWrite(txn->lockState(), nss.db());
- Lock::DBLock dbWrite(txn->lockState(), dbname, newlm::MODE_X);
+ const string dbName = nss.db().toString();
bool unused;
- Database* db = dbHolder().getOrCreate(txn, dbname, unused);
+ Database* db = dbHolder().getOrCreate(txn, dbName, unused);
// config
- string temp = dbname + ".system.namespaces";
+ string temp = dbName + ".system.namespaces";
BSONObj config = _conn->findOne(temp , BSON("name" << ns));
if (config["options"].isABSONObj()) {
WriteUnitOfWork wunit(txn);
@@ -329,7 +329,7 @@ namespace mongo {
}
// main data
- copy(txn, dbname,
+ copy(txn, dbName,
nss, nss,
logForRepl, false, true, mayYield, mayBeInterrupted,
Query(query).snapshot());
@@ -340,7 +340,7 @@ namespace mongo {
}
// indexes
- copyIndexes(txn, dbname,
+ copyIndexes(txn, dbName,
NamespaceString(ns), NamespaceString(ns),
logForRepl, false, true, mayYield,
mayBeInterrupted);
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index cc760412120..c0a1f398e56 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -126,26 +126,26 @@ namespace mongo {
string ns = temp["ns"].String();
- // Run operations under a nested lock as a hack to prevent yielding.
+ // Run operations under a nested lock as a hack to prevent them from yielding.
//
- // The list of operations is supposed to be applied atomically; yielding
- // would break atomicity by allowing an interruption or a shutdown to occur
- // after only some operations are applied. We are already locked globally
- // at this point, so taking a DBLock on the namespace creates a nested lock,
- // and yields are disallowed for operations that hold a nested lock.
+ // The list of operations is supposed to be applied atomically; yielding would break
+ // atomicity by allowing an interruption or a shutdown to occur after only some
+ // operations are applied. We are already locked globally at this point, so taking
+ // a DBWrite on the namespace creates a nested lock, and yields are disallowed for
+ // operations that hold a nested lock.
//
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
- // commit to happen with a subset of ops applied.
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal commit
+ // to happen with a subset of ops applied.
// TODO figure out what to do about this.
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
invariant(txn->lockState()->isRecursive());
Client::Context ctx(txn, ns);
bool failed = repl::applyOperation_inlock(txn,
- ctx.db(),
- temp,
- false,
- alwaysUpsert);
+ ctx.db(),
+ temp,
+ false,
+ alwaysUpsert);
ab.append(!failed);
if ( failed )
errors++;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 0b73001e559..a25deba9e32 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -115,7 +115,7 @@ namespace mongo {
set<string> clonedColls;
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Cloner cloner;
bool rval = cloner.go(txn, dbname, from, opts, &clonedColls, errmsg);
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 24439e82b9b..2a4f60cee31 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -161,7 +161,7 @@ namespace mongo {
return false;
}
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(txn, dbname);
Status status = cloneCollectionAsCapped( txn, ctx.db(), from, to, size, temp, true );
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index 8ffac0bff38..5ed0ac16aca 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -97,7 +97,7 @@ namespace mongo {
return false;
}
- NamespaceString ns(db, coll);
+ NamespaceString ns(db,coll);
if ( !ns.isNormal() ) {
errmsg = "bad namespace name";
return false;
@@ -144,7 +144,7 @@ namespace mongo {
compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- Lock::DBLock lk(txn->lockState(), db, newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 58f733e02b0..208d792c59e 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -182,14 +182,15 @@ namespace mongo {
cloner.setConnection(conn);
}
- if (fromSelf) {
- // SERVER-4328 todo lock just the two db's not everything for the fromself case
- Lock::GlobalWrite lk(txn->lockState());
- return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
- }
- Lock::DBLock lk (txn->lockState(), todb, newlm::MODE_X);
- return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
+ // SERVER-4328 todo lock just the two db's not everything for the fromself case
+ scoped_ptr<Lock::ScopedLock> lk( fromSelf ?
+ static_cast<Lock::ScopedLock*>(new Lock::GlobalWrite(txn->lockState())) :
+ static_cast<Lock::ScopedLock*>(new Lock::DBWrite(txn->lockState(), todb)));
+ if (!cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg )) {
+ return false;
+ }
+ return true;
}
} cmdCopyDB;
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index b1c81ca3584..884e43d0c29 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -133,7 +133,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), db);
// The lock here is just to prevent concurrency, nothing will write.
Client::Context ctx(txn, db);
@@ -152,7 +152,7 @@ namespace mongo {
std::string &errmsg,
BSONObjBuilder &result,
bool fromRepl ) {
- Lock::DBLock dbXLock(txn->lockState(), db, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), db);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, db);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 2276c15c67d..58791dea990 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -134,7 +134,7 @@ namespace mongo {
// now we know we have to create index(es)
// Note: createIndexes command does not currently respect shard versioning.
- Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
Client::Context ctx(txn, ns.ns(), false /* doVersion */ );
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 122ed26cc9a..ba93d95bc54 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -103,7 +103,7 @@ namespace mongo {
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
bool ok = wrappedRun(txn, dbname, jsobj, errmsg, anObjBuilder);
if (!ok) {
@@ -239,7 +239,7 @@ namespace mongo {
LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(txn, toDeleteNs);
Collection* collection = ctx.db()->getCollection( txn, toDeleteNs );
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 0e05feb5a71..4477e62bec0 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -97,7 +97,7 @@ namespace mongo {
return false;
}
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(txn, ns);
return runNoDirectClient( txn, ns ,
@@ -137,7 +137,7 @@ namespace mongo {
BSONObjBuilder& result,
string& errmsg) {
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
WriteUnitOfWork wunit(txn);
Client::Context cx(txn, ns);
@@ -335,7 +335,7 @@ namespace mongo {
}
}
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 81ed57a285d..e58e001a221 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -332,9 +332,7 @@ namespace mongo {
if (_useIncremental) {
// We don't want to log the deletion of incLong as it isn't replicated. While
// harmless, this would lead to a scary looking warning on the secondaries.
- Lock::DBLock lk(_txn->lockState(),
- nsToDatabaseSubstring(_config.incLong),
- newlm::MODE_X);
+ Lock::DBWrite lk(_txn->lockState(), _config.incLong);
if (Database* db = dbHolder().get(_txn, _config.incLong)) {
WriteUnitOfWork wunit(_txn);
db->dropCollection(_txn, _config.incLong);
@@ -593,11 +591,9 @@ namespace mongo {
op->setMessage("m/r: merge post processing",
"M/R Merge Post Processing Progress",
_safeCount(_db, _config.tempNamespace, BSONObj()));
- auto_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
- while (cursor->more()) {
- Lock::DBLock lock(_txn->lockState(),
- nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
- newlm::MODE_X);
+ auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
+ while ( cursor->more() ) {
+ Lock::DBWrite lock(_txn->lockState(), _config.outputOptions.finalNamespace);
WriteUnitOfWork wunit(_txn);
BSONObj o = cursor->nextSafe();
Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
@@ -1114,9 +1110,7 @@ namespace mongo {
if ( ! _onDisk )
return;
- Lock::DBLock kl(_txn->lockState(),
- nsToDatabaseSubstring(_config.incLong),
- newlm::MODE_X);
+ Lock::DBWrite kl(_txn->lockState(), _config.incLong);
WriteUnitOfWork wunit(_txn);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 204be07b75b..316d763babf 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -64,7 +64,7 @@ namespace mongo {
string ns = dbname + "." + coll;
BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
- Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
Database* db = ctx.db();
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index f5e372359b0..8b21578ce89 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -696,7 +696,7 @@ namespace mongo {
/**
* Gets the lock-holding object. Only valid if hasLock().
*/
- Lock::DBLock& getLock() { return *_writeLock; }
+ Lock::DBWrite& getLock() { return *_writeLock; }
/**
* Gets the target collection for the batch operation. Value is undefined
@@ -721,7 +721,7 @@ namespace mongo {
bool _lockAndCheckImpl(WriteOpResult* result);
// Guard object for the write lock on the target database.
- scoped_ptr<Lock::DBLock> _writeLock;
+ scoped_ptr<Lock::DBWrite> _writeLock;
// Context object on the target database. Must appear after writeLock, so that it is
// destroyed in proper order.
@@ -919,9 +919,7 @@ namespace mongo {
}
invariant(!_context.get());
- _writeLock.reset(new Lock::DBLock(txn->lockState(),
- nsToDatabase(request->getNS()),
- newlm::MODE_X));
+ _writeLock.reset(new Lock::DBWrite(txn->lockState(), request->getNS()));
if (!checkIsMasterForDatabase(request->getNS(), result)) {
return false;
}
@@ -1126,7 +1124,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBLock writeLock(txn->lockState(), nsString.db(), newlm::MODE_X);
+ Lock::DBWrite writeLock(txn->lockState(), nsString.ns());
///////////////////////////////////////////
if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
@@ -1181,7 +1179,7 @@ namespace mongo {
}
///////////////////////////////////////////
- Lock::DBLock writeLock(txn->lockState(), nss.db(), newlm::MODE_X);
+ Lock::DBWrite writeLock(txn->lockState(), nss.ns());
///////////////////////////////////////////
// Check version once we're locked
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 6701159a420..15e108a53f2 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -313,7 +313,7 @@ namespace {
if (supportsDocLocking()) {
_lockState->lock(_id, mode);
}
- else {
+ else if (isRead) {
_lockState->lock(_id, isRead ? newlm::MODE_S : newlm::MODE_X);
}
}
@@ -322,6 +322,9 @@ namespace {
_lockState->unlock(_id);
}
+ Lock::DBWrite::DBWrite(Locker* lockState, const StringData& dbOrNs) :
+ DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_X) { }
+
Lock::DBRead::DBRead(Locker* lockState, const StringData& dbOrNs) :
DBLock(lockState, nsToDatabaseSubstring(dbOrNs), newlm::MODE_S) { }
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index a55eaa077f3..d4f11bb28d4 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -196,10 +196,9 @@ namespace mongo {
* MODE_S: shared read access to the collection, blocking any writers
* MODE_X: exclusive access to the collection, blocking all other readers and writers
*
- * An appropriate DBLock must already be held before locking a collection: it is an error,
- * checked with a dassert(), to not have a suitable database lock before locking the
- * collection. For storage engines that do not support document-level locking, MODE_IS
- * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ * An appropriate DBLock must already be held before locking a collection.
+ * For storage engines that do not support document-level locking, MODE_IS will be
+ * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
*/
class CollectionLock : boost::noncopyable {
public:
@@ -211,6 +210,18 @@ namespace mongo {
};
/**
+ * Exclusive database lock -- DEPRECATED, please transition to DBLock and collection locks
+ *
+ * Allows exclusive write access to the given database, blocking any other access.
+ * Allows further (recursive) acquisition of database locks for this database in any mode.
+ * Also acquires the global lock in intent-exclusive (IX) mode.
+ */
+ class DBWrite : public DBLock {
+ public:
+ DBWrite(Locker* lockState, const StringData& dbOrNs);
+ };
+
+ /**
* Shared database lock -- DEPRECATED, please transition to DBLock and collection locks
*
* Allows concurrent read access to the given database, blocking any writers.
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index ddc59b17bd7..3391eab52a7 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -133,10 +133,10 @@ namespace mongo {
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_S);
}
- TEST(DConcurrency, DBLockTakesX) {
+ TEST(DConcurrency, DBWriteTakesX) {
LockState ls;
- Lock::DBLock dbWrite(&ls, "db", newlm::MODE_X);
+ Lock::DBWrite dbWrite(&ls, "db");
const newlm::ResourceId resIdDb(newlm::RESOURCE_DATABASE, string("db"));
ASSERT(ls.getLockMode(resIdDb) == newlm::MODE_X);
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 793cb492604..4b7e698f1ed 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -349,7 +349,7 @@ namespace mongo {
// Needs to be locked exclusively, because creates the system.profile collection
// in the local database.
//
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, dbname);
@@ -405,7 +405,7 @@ namespace mongo {
// This doesn't look like it requires exclusive DB lock, because it uses its own diag
// locking, but originally the lock was set to be WRITE, so preserving the behaviour.
//
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(txn, dbname);
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
@@ -461,7 +461,7 @@ namespace mongo {
return false;
}
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, nsToDrop);
Database* db = ctx.db();
@@ -561,7 +561,7 @@ namespace mongo {
!options["capped"].trueValue() || options["size"].isNumber() ||
options.hasField("$nExtents"));
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns);
@@ -987,7 +987,7 @@ namespace mongo {
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const string ns = dbname + "." + jsobj.firstElement().valuestr();
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
WriteUnitOfWork wunit(txn);
Client::Context ctx(txn, ns );
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 9a86fe82da8..0f804e890ee 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -121,7 +121,7 @@ namespace mongo {
// TODO A write lock is currently taken here to accommodate stages that perform writes
// (e.g. DeleteStage). This should be changed to use a read lock for read-only
// execution trees.
- Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), dbname);
Client::Context ctx(txn, dbname);
// Make sure the collection is valid.
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 8dd345de946..cbde00dbf04 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -64,7 +64,7 @@ namespace {
// This write lock is held throughout the index building process
// for this namespace.
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
Client::Context ctx(txn, ns);
Collection* collection = ctx.db()->getCollection(txn, ns);
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 2d8b189edc3..dd37aaf5b1e 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -621,7 +621,7 @@ namespace mongo {
UpdateExecutor executor(&request, &op.debug());
uassertStatusOK(executor.prepare());
- Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
Client::Context ctx(txn, ns );
UpdateResult res = executor.execute(ctx.db());
@@ -655,7 +655,7 @@ namespace mongo {
DeleteExecutor executor(&request);
uassertStatusOK(executor.prepare());
- Lock::DBLock lk(txn->lockState(), ns.db(), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns.ns());
Client::Context ctx(txn, ns);
long long n = executor.execute(ctx.db());
@@ -914,7 +914,7 @@ namespace mongo {
uassertStatusOK(status);
}
- Lock::DBLock lk(txn->lockState(), nsString.db(), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
// CONCURRENCY TODO: is being read locked in big log sufficient here?
// writelock is used to synchronize stepdowns w/ writes
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index c84a1caafa0..c2f11b34e6d 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -135,11 +135,10 @@ namespace {
BufBuilder profileBufBuilder(1024);
try {
- // NOTE: It's kind of weird that we lock the op's namespace, but have to for now
- // since we're sometimes inside the lock already
- const string dbname(nsToDatabase(currentOp.getNS()));
- Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
- if (dbHolder().get(txn, dbname) != NULL) {
+ // NOTE: It's kind of weird that we lock the op's namespace, but have to for now since
+ // we're sometimes inside the lock already
+ Lock::DBWrite lk(txn->lockState(), currentOp.getNS() );
+ if (dbHolder().get(txn, nsToDatabase(currentOp.getNS())) != NULL) {
// We are ok with the profiling happening in a different WUOW from the actual op.
WriteUnitOfWork wunit(txn);
Client::Context cx(txn, currentOp.getNS(), false);
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index 30f1d17c18d..f2b0f4c189e 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -49,14 +49,14 @@ namespace {
} // namespace
void clearInitialSyncFlag(OperationContext* txn) {
- Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), "local");
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
wunit.commit();
}
void setInitialSyncFlag(OperationContext* txn) {
- Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), "local");
WriteUnitOfWork wunit(txn);
Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
wunit.commit();
@@ -73,7 +73,7 @@ namespace {
}
void setMinValid(OperationContext* ctx, OpTime ts) {
- Lock::DBLock lk(ctx->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(ctx->lockState(), "local");
WriteUnitOfWork wunit(ctx);
Helpers::putSingleton(ctx, minvalidNS, BSON("$set" << BSON("ts" << ts)));
wunit.commit();
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index bcc372ca86f..e13ede3039c 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -119,7 +119,7 @@ namespace repl {
todo : make _logOpRS() call this so we don't repeat ourself?
*/
OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) {
- Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), "local");
// XXX soon this needs to be part of an outer WUOW not its own.
// We can't do this yet due to locking limitations.
WriteUnitOfWork wunit(txn);
@@ -236,7 +236,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBLock lk1(txn->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk1(txn->lockState(), "local");
WriteUnitOfWork wunit(txn);
if ( strncmp(ns, "local.", 6) == 0 ) {
@@ -320,7 +320,7 @@ namespace repl {
BSONObj *o2,
bool *bb,
bool fromMigrate ) {
- Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), "local");
WriteUnitOfWork wunit(txn);
static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor
diff --git a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
index 9ec360369ab..69edf78b57f 100644
--- a/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/repl_coordinator_external_state_impl.cpp
@@ -56,9 +56,7 @@ namespace repl {
namespace {
// TODO: Change this to local.system.replset when we remove disable the hybrid coordinator.
const char configCollectionName[] = "local.new.replset";
- const char configDatabaseName[] = "local";
const char meCollectionName[] = "local.me";
- const char meDatabaseName[] = "local";
const char tsFieldName[] = "ts";
} // namespace
@@ -90,7 +88,7 @@ namespace {
std::string myname = getHostName();
OID myRID;
{
- Lock::DBLock lock(txn->lockState(), meDatabaseName, newlm::MODE_X);
+ Lock::DBWrite lock(txn->lockState(), meCollectionName);
BSONObj me;
// local.me is an identifier for a server for getLastError w:2+
@@ -137,7 +135,7 @@ namespace {
OperationContext* txn,
const BSONObj& config) {
try {
- Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, newlm::MODE_X);
+ Lock::DBWrite dbWriteLock(txn->lockState(), configCollectionName);
Helpers::putSingleton(txn, configCollectionName, config);
return Status::OK();
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 75b0bc6ecf9..74273a40ba2 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -84,7 +84,7 @@ namespace {
options.syncIndexes = ! dataPass;
// Make database stable
- Lock::DBLock dbWrite(txn->lockState(), db, newlm::MODE_X);
+ Lock::DBWrite dbWrite(txn->lockState(), db);
if (!cloner.go(txn, db, host, options, NULL, err, &errCode)) {
log() << "initial sync: error while "
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 9cd57ae030c..20c99e289d9 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -123,7 +123,7 @@ namespace repl {
lk.reset(new Lock::GlobalWrite(txn->lockState()));
} else {
// DB level lock for this operation
- lk.reset(new Lock::DBLock(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X));
+ lk.reset(new Lock::DBWrite(txn->lockState(), ns));
}
Client::Context ctx(txn, ns);
@@ -328,7 +328,7 @@ namespace {
BackgroundSync* bgsync = BackgroundSync::get();
if (bgsync->getInitialSyncRequestedFlag()) {
// got a resync command
- Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn.lockState(), "local");
WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, "local");
@@ -481,7 +481,7 @@ namespace {
OpTime lastOpTime;
{
OperationContextImpl txn; // XXX?
- Lock::DBLock lk(txn.lockState(), "local", newlm::MODE_X);
+ Lock::DBWrite lk(txn.lockState(), "local");
WriteUnitOfWork wunit(&txn);
while (!ops->empty()) {
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index fff985c5666..71cb99539d0 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -43,8 +43,7 @@ namespace CountTests {
class Base {
public:
- Base() : lk(_txn.lockState(), nsToDatabaseSubstring(ns()), newlm::MODE_X),
- _wunit(&_txn), _context(&_txn, ns()) {
+ Base() : lk(_txn.lockState(), ns()), _wunit(&_txn), _context(&_txn, ns()) {
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
@@ -97,7 +96,7 @@ namespace CountTests {
OperationContextImpl _txn;
private:
- Lock::DBLock lk;
+ Lock::DBWrite lk;
WriteUnitOfWork _wunit;
Client::Context _context;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 70ab52c878c..4801b711a1c 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -62,7 +62,7 @@ namespace mongo {
{
// Remove _id range [_min, _max).
- Lock::DBLock lk(txn.lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn.lockState(), ns);
WriteUnitOfWork wunit(&txn);
Client::Context ctx(&txn, ns );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index aa6d7828629..72a27859efa 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -524,7 +524,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
+ Lock::DBWrite lk(txn.lockState(), dbName);
bool justCreated;
Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
@@ -567,7 +567,7 @@ namespace NamespaceTests {
OperationContextImpl txn;
- Lock::DBLock lk(txn.lockState(), dbName, newlm::MODE_X);
+ Lock::DBWrite lk(txn.lockState(), dbName);
bool justCreated;
Database* db = dbHolder().getOrCreate(&txn, dbName, justCreated);
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 598dad993d1..aaeaf6c6817 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -239,7 +239,7 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
- Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X);
+ Lock::DBWrite lk(_txn.lockState(), ns);
WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
@@ -585,7 +585,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
- Lock::DBLock lk(_txn.lockState(), "unittests", newlm::MODE_X);
+ Lock::DBWrite lk(_txn.lockState(), ns);
WriteUnitOfWork wunit(&_txn);
Client::Context ctx(&_txn, ns );
@@ -1401,7 +1401,7 @@ namespace QueryTests {
public:
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
- _lk(_txn.lockState(), "unittests", newlm::MODE_X),
+ _lk(_txn.lockState(), ns() ),
_wunit( &_txn ),
_ctx(&_txn, ns()) {
}
@@ -1410,7 +1410,7 @@ namespace QueryTests {
}
private:
- Lock::DBLock _lk;
+ Lock::DBWrite _lk;
WriteUnitOfWork _wunit;
Client::Context _ctx;
};
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 420c38f3d7f..797d1d72f6d 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -199,7 +199,7 @@ namespace ThreadedTests {
Lock::DBRead x(&lockState, "local");
}
{
- Lock::DBLock x(&lockState, "local", newlm::MODE_X);
+ Lock::DBWrite x(&lockState, "local");
// No actual writing here, so no WriteUnitOfWork
if( sometimes ) {
Lock::TempRelease t(&lockState);
@@ -211,11 +211,11 @@ namespace ThreadedTests {
}
{
- Lock::DBLock x(&lockState, "admin", newlm::MODE_X);
+ Lock::DBWrite x(&lockState, "admin");
}
}
else if( q == 3 ) {
- Lock::DBLock x(&lockState, "foo", newlm::MODE_X);
+ Lock::DBWrite x(&lockState, "foo");
Lock::DBRead y(&lockState, "admin");
}
else if( q == 4 ) {
@@ -223,7 +223,7 @@ namespace ThreadedTests {
Lock::DBRead y(&lockState, "admin");
}
else {
- Lock::DBLock w(&lockState, "foo", newlm::MODE_X);
+ Lock::DBWrite w(&lockState, "foo");
{
Lock::TempRelease t(&lockState);
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index 9d1e728ac4f..a314f2161c7 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -293,7 +293,7 @@ namespace mongo {
//
{
- Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
+ Lock::DBWrite writeLk(txn->lockState(), nss.ns());
shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 87b56cd89ca..7a22feb0a2a 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1244,7 +1244,7 @@ namespace mongo {
myVersion.incMajor();
{
- Lock::DBLock lk(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns );
verify( myVersion > shardingState.getVersion( ns ) );
// bump the metadata's version up and "forget" about the chunk being moved
@@ -1658,7 +1658,7 @@ namespace mongo {
if ( getState() != DONE ) {
// Unprotect the range if needed/possible on unsuccessful TO migration
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
string errMsg;
if (!shardingState.forgetPending(txn, ns, min, max, epoch, &errMsg)) {
warning() << errMsg << endl;
@@ -1716,7 +1716,7 @@ namespace mongo {
indexSpecs.insert(indexSpecs.begin(), indexes.begin(), indexes.end());
}
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
Client::Context ctx(txn, ns);
Database* db = ctx.db();
Collection* collection = db->getCollection( txn, ns );
@@ -1801,7 +1801,7 @@ namespace mongo {
{
// Protect the range by noting that we're now starting a migration to it
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
if (!shardingState.notePending(txn, ns, min, max, epoch, &errmsg)) {
warning() << errmsg << endl;
setState(FAIL);
@@ -2101,7 +2101,7 @@ namespace mongo {
}
}
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite lk(txn->lockState(), ns);
Client::Context ctx(txn, ns);
if (serverGlobalParams.moveParanoia) {
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index e718bf9db19..c5f6fe331a3 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -787,14 +787,14 @@ namespace mongo {
//
{
- Lock::DBLock writeLk(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite writeLk(txn->lockState(), ns);
- // NOTE: The newShardVersion resulting from this split is higher than any
- // other chunk version, so it's also implicitly the newCollVersion
+ // NOTE: The newShardVersion resulting from this split is higher than any other
+ // chunk version, so it's also implicitly the newCollVersion
ChunkVersion newShardVersion = collVersion;
- // Increment the minor version once, shardingState.splitChunk increments once
- // per split point (resulting in the correct final shard/collection version)
+ // Increment the minor version once, shardingState.splitChunk increments once per
+ // split point (resulting in the correct final shard/collection version)
// TODO: Revisit this interface, it's a bit clunky
newShardVersion.incMinor();
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 71340b94288..24928c69400 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -595,7 +595,7 @@ namespace mongo {
{
// DBLock needed since we're now potentially changing the metadata, and don't want
// reads/writes to be ongoing.
- Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), newlm::MODE_X);
+ Lock::DBWrite writeLk(txn->lockState(), ns );
//
// Get the metadata now that the load has completed
@@ -1298,7 +1298,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
- Lock::DBLock dbXLock(txn->lockState(), dbname, newlm::MODE_X);
+ Lock::DBWrite dbXLock(txn->lockState(), dbname);
Client::Context ctx(txn, dbname);
shardingState.appendInfo( result );
diff --git a/src/mongo/s/metadata_loader.h b/src/mongo/s/metadata_loader.h
index a058a1e5cfa..744a0d0c25a 100644
--- a/src/mongo/s/metadata_loader.h
+++ b/src/mongo/s/metadata_loader.h
@@ -48,12 +48,12 @@ namespace mongo {
* CollectionMetadata has both persisted and volatile state (for now) - the persisted
* config server chunk state and the volatile pending state which is only tracked locally
* while a server is the primary. This requires a two-step loading process - the persisted
- * chunk state *cannot* be loaded in a DBLock lock while the pending chunk state *must* be.
+ * chunk state *cannot* be loaded in a DBWrite lock while the pending chunk state *must* be.
*
* Example usage:
* beforeMetadata = <get latest local metadata>;
* remoteMetadata = makeCollectionMetadata( beforeMetadata, remoteMetadata );
- * DBLock lock(txn, dbname, newlm::MODE_X);
+ * DBWrite lock( ns );
* afterMetadata = <get latest local metadata>;
* promotePendingChunks( afterMetadata, remoteMetadata );
*