summaryrefslogtreecommitdiff
path: root/src/mongo/db/cloner.cpp
diff options
context:
space:
mode:
authorAndy Schwerin <schwerin@mongodb.com>2015-12-11 13:45:08 -0500
committerAndy Schwerin <schwerin@mongodb.com>2015-12-21 18:10:25 -0500
commit329132d8098db7922902b8a46ea4f46a94408188 (patch)
treeb0d131c3eb8bacdd8994e4cb19a8aec430e839c2 /src/mongo/db/cloner.cpp
parent7e7ea93facc494fbc393f7170d50b371fbf5f9f0 (diff)
downloadmongo-329132d8098db7922902b8a46ea4f46a94408188.tar.gz
SERVER-21958 Remove mayYield and mayBeInterrupted flags from cloner, as it is always set to true.
Diffstat (limited to 'src/mongo/db/cloner.cpp')
-rw-r--r--src/mongo/db/cloner.cpp128
1 files changed, 36 insertions, 92 deletions
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index b3b128b89ab..eec4a83e1db 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -141,9 +141,7 @@ struct Cloner::Fun {
<< "]",
!createdCollection);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (_mayBeInterrupted) {
- txn->checkForInterrupt();
- }
+ txn->checkForInterrupt();
WriteUnitOfWork wunit(txn);
Status s = userCreateNS(txn, db, to_collection.toString(), from_options, false);
@@ -163,41 +161,36 @@ struct Cloner::Fun {
log() << "clone " << to_collection << ' ' << numSeen << endl;
lastLog = now;
}
+ txn->checkForInterrupt();
- if (_mayBeInterrupted) {
- txn->checkForInterrupt();
- }
+ scopedXact.reset();
+ globalWriteLock.reset();
- if (_mayYield) {
- scopedXact.reset();
- globalWriteLock.reset();
+ CurOp::get(txn)->yielded();
- CurOp::get(txn)->yielded();
+ scopedXact.reset(new ScopedTransaction(txn, MODE_X));
+ globalWriteLock.reset(new Lock::GlobalWrite(txn->lockState()));
- scopedXact.reset(new ScopedTransaction(txn, MODE_X));
- globalWriteLock.reset(new Lock::GlobalWrite(txn->lockState()));
+ // Check if everything is still all right.
+ if (txn->writesAreReplicated()) {
+ uassert(
+ 28592,
+ str::stream() << "Cannot write to ns: " << to_collection.ns()
+ << " after yielding",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
+ }
- // Check if everything is still all right.
- if (txn->writesAreReplicated()) {
- uassert(28592,
- str::stream() << "Cannot write to ns: " << to_collection.ns()
- << " after yielding",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- to_collection));
- }
+ // TODO: SERVER-16598 abort if original db or collection is gone.
+ db = dbHolder().get(txn, _dbName);
+ uassert(28593,
+ str::stream() << "Database " << _dbName << " dropped while cloning",
+ db != NULL);
- // TODO: SERVER-16598 abort if original db or collection is gone.
- db = dbHolder().get(txn, _dbName);
- uassert(28593,
- str::stream() << "Database " << _dbName << " dropped while cloning",
- db != NULL);
-
- collection = db->getCollection(to_collection);
- uassert(28594,
- str::stream() << "Collection " << to_collection.ns()
- << " dropped while cloning",
- collection != NULL);
- }
+ collection = db->getCollection(to_collection);
+ uassert(28594,
+ str::stream() << "Collection " << to_collection.ns()
+ << " dropped while cloning",
+ collection != NULL);
}
BSONObj tmp = i.nextSafe();
@@ -218,9 +211,7 @@ struct Cloner::Fun {
verify(collection);
++numSeen;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (_mayBeInterrupted) {
- txn->checkForInterrupt();
- }
+ txn->checkForInterrupt();
WriteUnitOfWork wunit(txn);
@@ -250,8 +241,6 @@ struct Cloner::Fun {
BSONObj from_options;
NamespaceString to_collection;
time_t saveLast;
- bool _mayYield;
- bool _mayBeInterrupted;
};
/* copy the specified collection
@@ -263,8 +252,6 @@ void Cloner::copy(OperationContext* txn,
const NamespaceString& to_collection,
bool masterSameProcess,
bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted,
Query query) {
LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on "
<< _conn->getServerAddress() << " with filter " << query.toString() << endl;
@@ -275,8 +262,6 @@ void Cloner::copy(OperationContext* txn,
f.from_options = from_opts;
f.to_collection = to_collection;
f.saveLast = time(0);
- f._mayYield = mayYield;
- f._mayBeInterrupted = mayBeInterrupted;
int options = QueryOption_NoCursorTimeout | (slaveOk ? QueryOption_SlaveOk : 0);
{
@@ -301,9 +286,7 @@ void Cloner::copyIndexes(OperationContext* txn,
const BSONObj& from_opts,
const NamespaceString& to_collection,
bool masterSameProcess,
- bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted) {
+ bool slaveOk) {
LOG(2) << "\t\t copyIndexes " << from_collection << " to " << to_collection << " on "
<< _conn->getServerAddress();
@@ -336,9 +319,7 @@ void Cloner::copyIndexes(OperationContext* txn,
Collection* collection = db->getCollection(to_collection);
if (!collection) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (mayBeInterrupted) {
- txn->checkForInterrupt();
- }
+ txn->checkForInterrupt();
WriteUnitOfWork wunit(txn);
Status s = userCreateNS(txn, db, to_collection.toString(), from_opts, false);
@@ -356,8 +337,7 @@ void Cloner::copyIndexes(OperationContext* txn,
// matches. It also wouldn't work on non-empty collections so we would need both
// implementations anyway as long as that is supported.
MultiIndexBlock indexer(txn, collection);
- if (mayBeInterrupted)
- indexer.allowInterruption();
+ indexer.allowInterruption();
indexer.removeExistingIndexes(&indexesToBuild);
if (indexesToBuild.empty())
@@ -384,8 +364,6 @@ bool Cloner::copyCollection(OperationContext* txn,
const string& ns,
const BSONObj& query,
string& errmsg,
- bool mayYield,
- bool mayBeInterrupted,
bool shouldCopyIndexes) {
const NamespaceString nss(ns);
const string dbname = nss.db().toString();
@@ -411,8 +389,7 @@ bool Cloner::copyCollection(OperationContext* txn,
options = col["options"].Obj();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (mayBeInterrupted)
- txn->checkForInterrupt();
+ txn->checkForInterrupt();
WriteUnitOfWork wunit(txn);
Status status = userCreateNS(txn, db, ns, options, false);
@@ -430,16 +407,7 @@ bool Cloner::copyCollection(OperationContext* txn,
}
// main data
- copy(txn,
- dbname,
- nss,
- options,
- nss,
- false,
- true,
- mayYield,
- mayBeInterrupted,
- Query(query).snapshot());
+ copy(txn, dbname, nss, options, nss, false, true, Query(query).snapshot());
/* TODO : copyIndexes bool does not seem to be implemented! */
if (!shouldCopyIndexes) {
@@ -447,15 +415,7 @@ bool Cloner::copyCollection(OperationContext* txn,
}
// indexes
- copyIndexes(txn,
- dbname,
- NamespaceString(ns),
- options,
- NamespaceString(ns),
- false,
- true,
- mayYield,
- mayBeInterrupted);
+ copyIndexes(txn, dbname, NamespaceString(ns), options, NamespaceString(ns), false, true);
return true;
}
@@ -600,10 +560,7 @@ Status Cloner::copyDb(OperationContext* txn,
{
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (opts.mayBeInterrupted) {
- txn->checkForInterrupt();
- }
-
+ txn->checkForInterrupt();
WriteUnitOfWork wunit(txn);
// we defer building id index for performance - building it in batch is much
@@ -623,16 +580,7 @@ Status Cloner::copyDb(OperationContext* txn,
if (opts.snapshot)
q.snapshot();
- copy(txn,
- toDBName,
- from_name,
- options,
- to_name,
- masterSameProcess,
- opts.slaveOk,
- opts.mayYield,
- opts.mayBeInterrupted,
- q);
+ copy(txn, toDBName, from_name, options, to_name, masterSameProcess, opts.slaveOk, q);
// Copy releases the lock, so we need to re-load the database. This should
// probably throw if the database has changed in between, but for now preserve
@@ -648,9 +596,7 @@ Status Cloner::copyDb(OperationContext* txn,
set<RecordId> dups;
MultiIndexBlock indexer(txn, c);
- if (opts.mayBeInterrupted) {
- indexer.allowInterruption();
- }
+ indexer.allowInterruption();
uassertStatusOK(indexer.init(c->getIndexCatalog()->getDefaultIdIndexSpec()));
uassertStatusOK(indexer.insertAllDocumentsInCollection(&dups));
@@ -697,9 +643,7 @@ Status Cloner::copyDb(OperationContext* txn,
collection.getObjectField("options"),
to_name,
masterSameProcess,
- opts.slaveOk,
- opts.mayYield,
- opts.mayBeInterrupted);
+ opts.slaveOk);
}
}