summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHari Khalsa <hkhalsa@10gen.com>2014-05-12 17:05:11 -0400
committerHari Khalsa <hkhalsa@10gen.com>2014-05-13 16:07:58 -0400
commitb3e8e45ea6f346f804161e1fe4043ba3e5850ba8 (patch)
tree4179ad2d0fbb673fc5d72b7a5e84c97c34fe053f
parenta6a0c243b6cd6a5d45c876ab100a21073c070a00 (diff)
downloadmongo-b3e8e45ea6f346f804161e1fe4043ba3e5850ba8.tar.gz
SERVER-13922 remove query yielding and some dbtemprelease
-rw-r--r--jstests/core/group7.js4
-rw-r--r--jstests/noPassthrough/indexbg1.js3
-rw-r--r--jstests/noPassthrough/indexbg2.js3
-rw-r--r--jstests/noPassthrough/update_yield1.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js4
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js4
-rw-r--r--src/mongo/db/catalog/index_create.cpp30
-rw-r--r--src/mongo/db/clientcursor.cpp76
-rw-r--r--src/mongo/db/clientcursor.h7
-rw-r--r--src/mongo/db/commands/distinct.cpp1
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp1
-rw-r--r--src/mongo/db/commands/geonear.cpp1
-rw-r--r--src/mongo/db/commands/group.cpp1
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp3
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp1
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp8
-rw-r--r--src/mongo/db/dbcommands.cpp29
-rw-r--r--src/mongo/db/dbhelpers.cpp2
-rw-r--r--src/mongo/db/ops/count.cpp1
-rw-r--r--src/mongo/db/ops/delete_executor.cpp12
-rw-r--r--src/mongo/db/ops/update.cpp100
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp1
-rw-r--r--src/mongo/db/query/cached_plan_runner.cpp7
-rw-r--r--src/mongo/db/query/cached_plan_runner.h2
-rw-r--r--src/mongo/db/query/eof_runner.cpp3
-rw-r--r--src/mongo/db/query/eof_runner.h2
-rw-r--r--src/mongo/db/query/idhack_runner.cpp5
-rw-r--r--src/mongo/db/query/idhack_runner.h5
-rw-r--r--src/mongo/db/query/internal_runner.cpp26
-rw-r--r--src/mongo/db/query/internal_runner.h3
-rw-r--r--src/mongo/db/query/multi_plan_runner.cpp32
-rw-r--r--src/mongo/db/query/multi_plan_runner.h10
-rw-r--r--src/mongo/db/query/new_find.cpp2
-rw-r--r--src/mongo/db/query/plan_executor.cpp17
-rw-r--r--src/mongo/db/query/plan_executor.h5
-rw-r--r--src/mongo/db/query/runner.h47
-rw-r--r--src/mongo/db/query/runner_yield_policy.h118
-rw-r--r--src/mongo/db/query/single_solution_runner.cpp4
-rw-r--r--src/mongo/db/query/single_solution_runner.h2
-rw-r--r--src/mongo/db/query/subplan_runner.cpp19
-rw-r--r--src/mongo/db/query/subplan_runner.h4
-rw-r--r--src/mongo/dbtests/counttests.cpp40
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp23
-rw-r--r--src/mongo/dbtests/query_single_solution_runner.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp1
-rw-r--r--src/mongo/dbtests/runner_registry.cpp1
-rw-r--r--src/mongo/s/d_migrate.cpp6
-rw-r--r--src/mongo/s/d_split.cpp4
49 files changed, 41 insertions, 652 deletions
diff --git a/jstests/core/group7.js b/jstests/core/group7.js
index 1413000079c..76102cf343d 100644
--- a/jstests/core/group7.js
+++ b/jstests/core/group7.js
@@ -1,4 +1,7 @@
// Test yielding group command SERVER-1395
+// Currently disabled, see SERVER-13922.
+
+if (0) {
t = db.jstests_group7;
t.drop();
@@ -45,3 +48,4 @@ for( var j = 1; j <= 6; ++j ) {
updates *= 2;
}
assert( yielded );
+}
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index d2c3ccac7e4..b335480d0ff 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -1,3 +1,5 @@
+// SERVER-13922
+if (0) {
// Test background index creation
load( "jstests/libs/slow_weekly_util.js" )
@@ -120,3 +122,4 @@ printjson( db.getLastError() );
assert( !db.getLastError() );
testServer.stop();
+} // if(0)
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index 0b5edc79aa2..e89b91fa2cf 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -1,3 +1,5 @@
+// SERVER-13922
+if (0) {
// Test background index creation w/ constraints
load( "jstests/libs/slow_weekly_util.js" )
@@ -88,3 +90,4 @@ doTest( "false" );
doTest( "true" );
testServer.stop();
+} // if (0)
diff --git a/jstests/noPassthrough/update_yield1.js b/jstests/noPassthrough/update_yield1.js
index db684a6d6eb..622ce84cc50 100644
--- a/jstests/noPassthrough/update_yield1.js
+++ b/jstests/noPassthrough/update_yield1.js
@@ -1,3 +1,5 @@
+// SERVER-13922
+if (0) {
load( "jstests/libs/slow_weekly_util.js" );
var testServer = new SlowWeeklyMongod( "update_yield1" );
@@ -71,3 +73,5 @@ x = db.currentOp();
assert.eq( 0 , x.inprog.length , "weird 2" );
testServer.stop();
+
+} // if (0)
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index 9e754b747ef..73446736d84 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -10,6 +10,9 @@
// Index drop race
+// SERVER-13922 background indices are foregrounded with yielding gone
+if (0) {
+
var dbname = 'dropbgindex';
var collection = 'jstests_feh';
var size = 500000;
@@ -100,3 +103,4 @@ assert.soon( function() {
);
replTest.stopSet();
+} // if (0)
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
index 38cced11bb9..7aac1265c6d 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
@@ -1,3 +1,6 @@
+// SERVER-13922
+if (0) {
+
// TODO: SERVER-13215 move test back to replSets suite.
/**
@@ -110,3 +113,4 @@
replTest.stopSet();
}());
+} // if(0)
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 631dc12ede1..a33fe7ff9d8 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/kill_current_op.h"
#include "mongo/db/pdfile_private.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/query/runner_yield_policy.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/rs.h"
@@ -81,7 +80,7 @@ namespace mongo {
Collection* collection,
const IndexDescriptor* descriptor,
IndexAccessMethod* accessMethod,
- bool shouldYield) {
+ bool canBeKilled ) {
string ns = collection->ns().ns(); // our copy for sanity
@@ -92,7 +91,7 @@ namespace mongo {
{
stringstream ss;
ss << "Index Build";
- if ( shouldYield )
+ if ( canBeKilled )
ss << "(background)";
curopMessage = ss.str();
}
@@ -107,10 +106,6 @@ namespace mongo {
auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns,collection));
- // We're not delegating yielding to the runner because we need to know when a yield
- // happens.
- RunnerYieldPolicy yieldPolicy;
-
std::string idxName = descriptor->indexName();
// After this yields in the loop, idx may point at a different index (if indexes get
@@ -131,7 +126,7 @@ namespace mongo {
}
catch( AssertionException& e ) {
if (ErrorCodes::isInterruption(DBException::convertExceptionCode(e.getCode()))) {
- killCurrentOp.checkForInterrupt();
+ txn->checkForInterrupt();
}
// TODO: Does exception really imply dropDups exception?
@@ -168,26 +163,17 @@ namespace mongo {
progress.hit();
getDur().commitIfNeeded();
- if (shouldYield && yieldPolicy.shouldYield()) {
- // Note: yieldAndCheckIfOK checks for interrupt and thus can throw
- if (!yieldPolicy.yieldAndCheckIfOK(runner.get())) {
- uasserted(ErrorCodes::CursorNotFound, "cursor gone during bg index");
- break;
- }
+ if (canBeKilled) {
// Checking for interrupt here is necessary because the bg index
// interruptors can only interrupt this index build while they hold
// a write lock, and yieldAndCheckIfOK only checks for
// interrupt prior to yielding our write lock. We need to check the kill flag
// here before another iteration of the loop.
- killCurrentOp.checkForInterrupt();
-
- progress.setTotalWhileRunning( collection->numRecords() );
- // Recalculate idxNo if we yielded
- IndexDescriptor* idx = collection->getIndexCatalog()->findIndexByName( idxName,
- true );
- verify( idx && idx == descriptor );
+ txn->checkForInterrupt();
}
+
+ progress.setTotalWhileRunning( collection->numRecords() );
}
progress.finished();
@@ -295,7 +281,7 @@ namespace mongo {
getDur().commitIfNeeded();
RARELY if ( mayInterrupt ) {
- killCurrentOp.checkForInterrupt();
+ txn->checkForInterrupt();
}
}
}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 189f89e611a..396fd522b49 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -148,66 +148,6 @@ namespace mongo {
_collection = NULL;
}
- void yieldOrSleepFor1Microsecond() {
-#ifdef _WIN32
- SwitchToThread();
-#elif defined(__linux__)
- pthread_yield();
-#else
- sleepmicros(1);
-#endif
- }
-
- void ClientCursor::staticYield(int micros, const StringData& ns) {
- bool haveReadLock = Lock::isReadLocked();
-
- killCurrentOp.checkForInterrupt();
- {
- dbtempreleasecond unlock;
- if ( unlock.unlocked() ) {
- if ( haveReadLock ) {
- // This sleep helps reader threads yield to writer threads.
- // Without this, the underlying reader/writer lock implementations
- // are not sufficiently writer-greedy.
-#ifdef _WIN32
- SwitchToThread();
-#else
- if ( micros == 0 ) {
- yieldOrSleepFor1Microsecond();
- }
- else {
- sleepmicros(1);
- }
-#endif
- }
- else {
- if ( micros == -1 ) {
- sleepmicros(Client::recommendedYieldMicros());
- }
- else if ( micros == 0 ) {
- yieldOrSleepFor1Microsecond();
- }
- else if ( micros > 0 ) {
- sleepmicros( micros );
- }
- }
-
- }
- else if ( Listener::getTimeTracker() == 0 ) {
- // we aren't running a server, so likely a repair, so don't complain
- }
- else {
- CurOp * c = cc().curop();
- while ( c->parent() )
- c = c->parent();
- warning() << "ClientCursor::staticYield can't unlock b/c of recursive lock"
- << " ns: " << ns
- << " top: " << c->info()
- << endl;
- }
- }
- }
-
//
// Timing and timeouts
//
@@ -227,22 +167,6 @@ namespace mongo {
mongo::updateSlaveLocation( curop , _ns.c_str() , _slaveReadTill );
}
- int ClientCursor::suggestYieldMicros() {
- int writers = 0;
- int readers = 0;
-
- int micros = Client::recommendedYieldMicros( &writers , &readers );
-
- if ( micros > 0 && writers == 0 && Lock::isR() ) {
- // we have a read lock, and only reads are coming on, so why bother unlocking
- return 0;
- }
-
- wassert( micros < 10000000 );
- dassert( micros < 1000001 );
- return micros;
- }
-
//
// Pin methods
// TODO: Simplify when we kill Cursor. In particular, once we've pinned a CC, it won't be
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index b184f524af7..f78cc34c9df 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -82,13 +82,6 @@ namespace mongo {
void kill();
//
- // Yielding.
- //
-
- static void staticYield(int micros, const StringData& ns);
- static int suggestYieldMicros();
-
- //
// Timing and timeouts
//
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 236867d4a1f..3bf8152fee3 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -112,7 +112,6 @@ namespace mongo {
auto_ptr<Runner> runner(rawRunner);
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
string cursorName;
BSONObj obj;
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 575de9f7484..34370d6907a 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -154,7 +154,6 @@ namespace mongo {
// Set up automatic yielding
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
Runner::RunnerState state;
if (Runner::RUNNER_ADVANCED == (state = runner->getNext(&doc, NULL))) {
diff --git a/src/mongo/db/commands/geonear.cpp b/src/mongo/db/commands/geonear.cpp
index 3a1203af335..d7c6f5a4cd5 100644
--- a/src/mongo/db/commands/geonear.cpp
+++ b/src/mongo/db/commands/geonear.cpp
@@ -195,7 +195,6 @@ namespace mongo {
auto_ptr<Runner> runner(rawRunner);
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
double totalDistance = 0;
BSONObjBuilder resultBuilder(result.subarrayStart("results"));
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 38b01c5936e..c6fd5c0f7bb 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -153,7 +153,6 @@ namespace mongo {
auto_ptr<Runner> runner(rawRunner);
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
BSONObj obj;
Runner::RunnerState state;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 8af97cf9d56..b1e80ecbf06 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -974,7 +974,6 @@ namespace mongo {
auto_ptr<Runner> runner(rawRunner);
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
// iterate over all sorted objects
BSONObj o;
@@ -1325,7 +1324,6 @@ namespace mongo {
auto_ptr<Runner> runner(rawRunner);
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
Timer mt;
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 587dcea13eb..34459ffaf18 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -85,9 +85,6 @@ namespace mongo {
_collection = NULL;
_iterators.clear();
}
- virtual void setYieldPolicy(YieldPolicy policy) {
- invariant( false );
- }
virtual void saveState() {
for (size_t i = 0; i < _iterators.size(); i++) {
_iterators[i]->prepareToYield();
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 3cf66336dfc..7d48d1b40db 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -117,7 +117,6 @@ namespace {
}
// These are all no-ops for PipelineRunners
- virtual void setYieldPolicy(YieldPolicy policy) {}
virtual void saveState() {}
virtual bool restoreState() { return true; }
virtual const Collection* collection() { return NULL; }
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 02cabcb1c3a..160ed358bab 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -808,14 +808,6 @@ namespace mongo {
if (elapsedTracker.intervalHasElapsed()) {
// Consider yielding between inserts.
- if (state.hasLock()) {
- int micros = ClientCursor::suggestYieldMicros();
- if (micros > 0) {
- state.unlock();
- killCurrentOp.checkForInterrupt();
- sleepmicros(micros);
- }
- }
killCurrentOp.checkForInterrupt();
elapsedTracker.resetLastTime();
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 37e855a15bc..97ef4247644 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -863,33 +863,8 @@ namespace mongo {
int len;
const char * data = owned["data"].binDataClean( len );
- // Save state, yield, run the MD5, and reacquire lock.
- runner->saveState();
-
- try {
- dbtempreleasecond yield;
-
- md5_append( &st , (const md5_byte_t*)(data) , len );
- n++;
- }
- catch (SendStaleConfigException&) {
- log() << "metadata changed during filemd5" << endl;
- break;
- }
-
- // Have the lock again. See if we were killed.
- if (!runner->restoreState()) {
- if (!partialOk) {
- uasserted(13281, "File deleted during filemd5 command");
- }
- }
-
- if (!shardingState.getVersion(ns).isWriteCompatibleWith(shardVersionAtStart)) {
- // return partial results. Mongos will get the error at the start of the next
- // call if it doesn't update first.
- log() << "Config changed during filemd5 - command will resume " << endl;
- break;
- }
+ md5_append( &st , (const md5_byte_t*)(data) , len );
+ n++;
}
if (partialOk)
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 6f47e7bff7e..38815864034 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -379,7 +379,6 @@ namespace mongo {
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
DiskLoc rloc;
BSONObj obj;
Runner::RunnerState state;
@@ -530,7 +529,6 @@ namespace mongo {
auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
- runner->setYieldPolicy(Runner::YIELD_AUTO);
DiskLoc loc;
Runner::RunnerState state;
diff --git a/src/mongo/db/ops/count.cpp b/src/mongo/db/ops/count.cpp
index e4ffb974625..f576c33829b 100644
--- a/src/mongo/db/ops/count.cpp
+++ b/src/mongo/db/ops/count.cpp
@@ -96,7 +96,6 @@ namespace mongo {
try {
const ScopedRunnerRegistration safety(runner.get());
- runner->setYieldPolicy(Runner::YIELD_AUTO);
long long count = 0;
Runner::RunnerState state;
diff --git a/src/mongo/db/ops/delete_executor.cpp b/src/mongo/db/ops/delete_executor.cpp
index f4c65b509fa..7c16d203756 100644
--- a/src/mongo/db/ops/delete_executor.cpp
+++ b/src/mongo/db/ops/delete_executor.cpp
@@ -114,11 +114,6 @@ namespace mongo {
long long nDeleted = 0;
- const bool canYield = !_request->isGod() && (
- _canonicalQuery.get() ?
- !QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC) :
- LiteParsedQuery::isQueryIsolated(_request->getQuery()));
-
Runner* rawRunner;
if (_canonicalQuery.get()) {
uassertStatusOK(getRunner(collection, _canonicalQuery.release(), &rawRunner));
@@ -133,12 +128,7 @@ namespace mongo {
}
auto_ptr<Runner> runner(rawRunner);
- auto_ptr<ScopedRunnerRegistration> safety;
-
- if (canYield) {
- safety.reset(new ScopedRunnerRegistration(runner.get()));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
- }
+ ScopedRunnerRegistration safety(runner.get());
DiskLoc rloc;
Runner::RunnerState state;
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 0e8bec14944..6559c2203a0 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/query/get_runner.h"
#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/query_planner_common.h"
-#include "mongo/db/query/runner_yield_policy.h"
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/storage/mmap_v1/dur_transaction.h"
@@ -386,70 +385,6 @@ namespace mongo {
return Status::OK();
}
- Status recoverFromYield(const UpdateRequest& request,
- UpdateDriver* driver,
- Collection* collection) {
-
- const NamespaceString& nsString(request.getNamespaceString());
- // We yielded and recovered OK, and our cursor is still good. Details about
- // our namespace may have changed while we were yielded, so we re-acquire
- // them here. If we can't do so, escape the update loop. Otherwise, refresh
- // the driver so that it knows about what is currently indexed.
-
- if (request.shouldCallLogOp() && !isMasterNs(nsString.ns().c_str())) {
- return Status(ErrorCodes::NotMaster, mongoutils::str::stream() <<
- "Demoted from primary while performing update on " << nsString.ns());
- }
-
- Database* db = dbHolder().get(nsString.db().toString(), storageGlobalParams.dbpath);
-
- Collection* oldCollection = collection;
- collection = db->getCollection(nsString.ns());
-
- // We should not get a new pointer to the same collection...
- if (oldCollection && (oldCollection != collection))
- return Status(ErrorCodes::IllegalOperation,
- str::stream() << "Collection changed during the Update: ok?"
- << " old: " << oldCollection->ok()
- << " new:" << collection->ok());
-
- if (!collection)
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield -- "
- "collection pointer NULL.");
-
- if (!collection->ok())
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield -- "
- "collection not ok().");
-
- IndexCatalog* idxCatalog = collection->getIndexCatalog();
- if (!idxCatalog)
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield -- "
- "IndexCatalog pointer NULL.");
-
- if (!idxCatalog->ok())
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield -- "
- "IndexCatalog not ok().");
-
- if (request.getLifecycle()) {
- UpdateLifecycle* lifecycle = request.getLifecycle();
- lifecycle->setCollection(collection);
-
- if (!lifecycle->canContinue()) {
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield.",
- 17270);
- }
-
- driver->refreshIndexKeys(lifecycle->getIndexKeys());
- }
-
- return Status::OK();
- }
-
Status ensureIdAndFirst(mb::Document& doc) {
mb::Element idElem = mb::findFirstChildNamed(doc.root(), idFieldName);
@@ -503,7 +438,6 @@ namespace mongo {
std::auto_ptr<CanonicalQuery> cqHolder(cq);
const NamespaceString& nsString = request.getNamespaceString();
UpdateLifecycle* lifecycle = request.getLifecycle();
- const CurOp* curOp = cc().curop();
Collection* collection = db->getCollection(nsString.ns());
@@ -532,15 +466,6 @@ namespace mongo {
// Register Runner with ClientCursor
const ScopedRunnerRegistration safety(runner.get());
- // Use automatic yield policy
- runner->setYieldPolicy(Runner::YIELD_AUTO);
-
- // If the update was marked with '$isolated' (a.k.a '$atomic'), we are not allowed to
- // yield while evaluating the update loop below.
- const bool isolated =
- (cq && QueryPlannerCommon::hasNode(cq->root(), MatchExpression::ATOMIC)) ||
- LiteParsedQuery::isQueryIsolated(request.getQuery());
-
//
// We'll start assuming we have one or more documents for this update. (Otherwise,
// we'll fall-back to insert case (if upsert is true).)
@@ -581,41 +506,20 @@ namespace mongo {
// Used during iteration of docs
BSONObj oldObj;
- // Keep track if we have done a write in isolation mode, which will indicate we can't yield
- bool isolationModeWriteOccured = false;
-
// Get first doc, and location
Runner::RunnerState state = Runner::RUNNER_ADVANCED;
- // Keep track of yield count so we can see if one happens on the getNext() calls below
- int oldYieldCount = curOp->numYields();
-
uassert(ErrorCodes::NotMaster,
mongoutils::str::stream() << "Not primary while updating " << nsString.ns(),
!request.shouldCallLogOp() || isMasterNs(nsString.ns().c_str()));
while (true) {
- // See if we have a write in isolation mode
- isolationModeWriteOccured = isolated && (opDebug->nModified > 0);
-
- // Change to manual yielding (no yielding) if we have written in isolation mode
- if (isolationModeWriteOccured) {
- runner->setYieldPolicy(Runner::YIELD_MANUAL);
- }
-
- // keep track of the yield count before calling getNext (which might yield).
- oldYieldCount = curOp->numYields();
-
// Get next doc, and location
DiskLoc loc;
state = runner->getNext(&oldObj, &loc);
- const bool didYield = (oldYieldCount != curOp->numYields());
if (state != Runner::RUNNER_ADVANCED) {
if (state == Runner::RUNNER_EOF) {
- if (didYield)
- uassertStatusOK(recoverFromYield(request, driver, collection));
-
// We have reached the logical end of the loop, so do yielding recovery
break;
}
@@ -626,10 +530,6 @@ namespace mongo {
}
}
- // Refresh things after a yield.
- if (didYield)
- uassertStatusOK(recoverFromYield(request, driver, collection));
-
// We fill this with the new locs of moved doc so we don't double-update.
if (updatedLocs && updatedLocs->count(loc) > 0) {
continue;
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index a61e76e1ce3..5f575ab3d73 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -198,7 +198,6 @@ namespace {
// DocumentSourceCursor expects a yielding Runner that has had its state saved.
- runner->setYieldPolicy(Runner::YIELD_AUTO);
runner->saveState();
// Put the Runner into a DocumentSourceCursor and add it to the front of the pipeline.
diff --git a/src/mongo/db/query/cached_plan_runner.cpp b/src/mongo/db/query/cached_plan_runner.cpp
index 6a17ffb4aa7..f93a9332ce3 100644
--- a/src/mongo/db/query/cached_plan_runner.cpp
+++ b/src/mongo/db/query/cached_plan_runner.cpp
@@ -116,13 +116,6 @@ namespace mongo {
}
}
- void CachedPlanRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- _exec->setYieldPolicy(policy);
- if (NULL != _backupPlan.get()) {
- _backupPlan->setYieldPolicy(policy);
- }
- }
-
const std::string& CachedPlanRunner::ns() {
return _canonicalQuery->getParsed().ns();
}
diff --git a/src/mongo/db/query/cached_plan_runner.h b/src/mongo/db/query/cached_plan_runner.h
index 49d7cf8572f..f994640dcf7 100644
--- a/src/mongo/db/query/cached_plan_runner.h
+++ b/src/mongo/db/query/cached_plan_runner.h
@@ -76,8 +76,6 @@ namespace mongo {
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual const std::string& ns();
virtual void kill();
diff --git a/src/mongo/db/query/eof_runner.cpp b/src/mongo/db/query/eof_runner.cpp
index 2bbf9993896..3021a4a9004 100644
--- a/src/mongo/db/query/eof_runner.cpp
+++ b/src/mongo/db/query/eof_runner.cpp
@@ -57,9 +57,6 @@ namespace mongo {
return false;
}
- void EOFRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- }
-
void EOFRunner::invalidate(const DiskLoc& dl, InvalidationType type) {
}
diff --git a/src/mongo/db/query/eof_runner.h b/src/mongo/db/query/eof_runner.h
index acb88e2ce57..cd9e02b8702 100644
--- a/src/mongo/db/query/eof_runner.h
+++ b/src/mongo/db/query/eof_runner.h
@@ -62,8 +62,6 @@ namespace mongo {
virtual bool restoreState();
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns();
diff --git a/src/mongo/db/query/idhack_runner.cpp b/src/mongo/db/query/idhack_runner.cpp
index c47ab3e4a51..0405f2caf74 100644
--- a/src/mongo/db/query/idhack_runner.cpp
+++ b/src/mongo/db/query/idhack_runner.cpp
@@ -188,11 +188,6 @@ namespace mongo {
bool IDHackRunner::restoreState() { return true; }
- void IDHackRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- if (_done || _killed) { return; }
- _policy = policy;
- }
-
// Nothing to do here, holding no state.
void IDHackRunner::invalidate(const DiskLoc& dl, InvalidationType type) {
if (_done || _killed) { return; }
diff --git a/src/mongo/db/query/idhack_runner.h b/src/mongo/db/query/idhack_runner.h
index fce573ee6e5..1b2b4de95f4 100644
--- a/src/mongo/db/query/idhack_runner.h
+++ b/src/mongo/db/query/idhack_runner.h
@@ -65,8 +65,6 @@ namespace mongo {
virtual bool restoreState();
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns();
@@ -107,9 +105,6 @@ namespace mongo {
// this.
boost::scoped_ptr<CanonicalQuery> _query;
- // Are we allowed to release the lock?
- Runner::YieldPolicy _policy;
-
// Did someone call kill() on us?
bool _killed;
diff --git a/src/mongo/db/query/internal_runner.cpp b/src/mongo/db/query/internal_runner.cpp
index 1f1384d3c93..fa60b334df3 100644
--- a/src/mongo/db/query/internal_runner.cpp
+++ b/src/mongo/db/query/internal_runner.cpp
@@ -42,13 +42,14 @@ namespace mongo {
InternalRunner::InternalRunner(const Collection* collection, PlanStage* root, WorkingSet* ws)
: _collection(collection),
- _exec(new PlanExecutor(ws, root, collection)),
- _policy(Runner::YIELD_MANUAL) {
+ _exec(new PlanExecutor(ws, root, collection)) {
+
+ _collection->cursorCache()->registerRunner(this);
invariant( collection );
}
InternalRunner::~InternalRunner() {
- if (Runner::YIELD_AUTO == _policy && _collection) {
+ if (_collection) {
_collection->cursorCache()->deregisterRunner(this);
}
}
@@ -77,25 +78,6 @@ namespace mongo {
_exec->invalidate(dl, type);
}
- void InternalRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- // No-op.
- if (_policy == policy) { return; }
-
- invariant( _collection );
-
- if (Runner::YIELD_AUTO == policy) {
- // Going from manual to auto.
- _collection->cursorCache()->registerRunner(this);
- }
- else {
- // Going from auto to manual.
- _collection->cursorCache()->deregisterRunner(this);
- }
-
- _policy = policy;
- _exec->setYieldPolicy(policy);
- }
-
void InternalRunner::kill() {
_exec->kill();
_collection = NULL;
diff --git a/src/mongo/db/query/internal_runner.h b/src/mongo/db/query/internal_runner.h
index b2147bbc529..5b7c7613022 100644
--- a/src/mongo/db/query/internal_runner.h
+++ b/src/mongo/db/query/internal_runner.h
@@ -74,8 +74,6 @@ namespace mongo {
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual void kill();
virtual const Collection* collection() { return _collection; }
@@ -97,7 +95,6 @@ namespace mongo {
const Collection* _collection;
boost::scoped_ptr<PlanExecutor> _exec;
- Runner::YieldPolicy _policy;
};
} // namespace mongo
diff --git a/src/mongo/db/query/multi_plan_runner.cpp b/src/mongo/db/query/multi_plan_runner.cpp
index b544efa1e23..99d3a144a99 100644
--- a/src/mongo/db/query/multi_plan_runner.cpp
+++ b/src/mongo/db/query/multi_plan_runner.cpp
@@ -56,7 +56,6 @@ namespace mongo {
_killed(false),
_failure(false),
_failureCount(0),
- _policy(Runner::YIELD_MANUAL),
_query(query),
_bestChild(numeric_limits<size_t>::max()),
_backupSolution(NULL),
@@ -89,27 +88,6 @@ namespace mongo {
_candidates.push_back(CandidatePlan(solution, root, ws));
}
- void MultiPlanRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- if (_failure || _killed) { return; }
-
- _policy = policy;
-
- if (NULL != _bestPlan) {
- _bestPlan->setYieldPolicy(policy);
- if (NULL != _backupPlan) {
- _backupPlan->setYieldPolicy(policy);
- }
- } else {
- // Still running our candidates and doing our own yielding.
- if (Runner::YIELD_MANUAL == policy) {
- _yieldPolicy.reset();
- }
- else {
- _yieldPolicy.reset(new RunnerYieldPolicy());
- }
- }
- }
-
void MultiPlanRunner::saveState() {
if (_failure || _killed) { return; }
@@ -363,7 +341,6 @@ namespace mongo {
_bestPlan.reset(new PlanExecutor(_candidates[_bestChild].ws,
_candidates[_bestChild].root,
_collection));
- _bestPlan->setYieldPolicy(_policy);
_alreadyProduced = _candidates[_bestChild].results;
_bestSolution.reset(_candidates[_bestChild].solution);
@@ -382,7 +359,6 @@ namespace mongo {
_backupPlan = new PlanExecutor(_candidates[i].ws,
_candidates[i].root,
_collection);
- _backupPlan->setYieldPolicy(_policy);
break;
}
}
@@ -467,14 +443,6 @@ namespace mongo {
CandidatePlan& candidate = _candidates[i];
if (candidate.failed) { continue; }
- // Yield, if we can yield ourselves.
- if (NULL != _yieldPolicy.get() && _yieldPolicy->shouldYield()) {
- saveState();
- _yieldPolicy->yield();
- if (_failure || _killed) { return false; }
- restoreState();
- }
-
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState state = candidate.root->work(&id);
diff --git a/src/mongo/db/query/multi_plan_runner.h b/src/mongo/db/query/multi_plan_runner.h
index 01dbc3091c3..5da33d74653 100644
--- a/src/mongo/db/query/multi_plan_runner.h
+++ b/src/mongo/db/query/multi_plan_runner.h
@@ -36,7 +36,6 @@
#include "mongo/db/exec/working_set.h"
#include "mongo/db/query/plan_ranker.h" // for CandidatePlan
#include "mongo/db/query/runner.h"
-#include "mongo/db/query/runner_yield_policy.h"
namespace mongo {
@@ -103,8 +102,6 @@ namespace mongo {
virtual bool restoreState();
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual const std::string& ns();
virtual void kill();
@@ -146,10 +143,6 @@ namespace mongo {
// If everything fails during the plan competition, we can't pick one.
size_t _failureCount;
- // We need to cache this so that when we switch from running our candidates to using a
- // PlanExecutor, we can set the right yielding policy on it.
- Runner::YieldPolicy _policy;
-
// The winner of the plan competition...
boost::scoped_ptr<PlanExecutor> _bestPlan;
@@ -165,9 +158,6 @@ namespace mongo {
// Candidate plans' stats. Owned here.
std::vector<PlanStageStats*> _candidateStats;
- // Yielding policy we use when we're running candidates.
- boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy;
-
// The query that we're trying to figure out the best solution to.
boost::scoped_ptr<CanonicalQuery> _query;
diff --git a/src/mongo/db/query/new_find.cpp b/src/mongo/db/query/new_find.cpp
index c840749a3a5..40990a0baa6 100644
--- a/src/mongo/db/query/new_find.cpp
+++ b/src/mongo/db/query/new_find.cpp
@@ -367,7 +367,6 @@ namespace mongo {
// Takes ownership of ws and stage.
auto_ptr<InternalRunner> runner(new InternalRunner(collection, stage, oplogws));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
// The stage returns a DiskLoc of where to start.
DiskLoc startLoc;
@@ -548,7 +547,6 @@ namespace mongo {
// We turn on auto-yielding for the runner here. The runner registers itself with the
// active runners list in ClientCursor.
auto_ptr<ScopedRunnerRegistration> safety(new ScopedRunnerRegistration(runner.get()));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
BSONObj obj;
Runner::RunnerState state;
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 48f1c3f5bf7..76ff61e62f2 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -63,27 +63,10 @@ namespace mongo {
if (!_killed) { _root->invalidate(dl, type); }
}
- void PlanExecutor::setYieldPolicy(Runner::YieldPolicy policy) {
- if (Runner::YIELD_MANUAL == policy) {
- _yieldPolicy.reset();
- }
- else {
- _yieldPolicy.reset(new RunnerYieldPolicy());
- }
- }
-
Runner::RunnerState PlanExecutor::getNext(BSONObj* objOut, DiskLoc* dlOut) {
if (_killed) { return Runner::RUNNER_DEAD; }
for (;;) {
- // Yield, if we can yield ourselves.
- if (NULL != _yieldPolicy.get() && _yieldPolicy->shouldYield()) {
- saveState();
- _yieldPolicy->yield();
- if (_killed) { return Runner::RUNNER_DEAD; }
- restoreState();
- }
-
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState code = _root->work(&id);
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index f2382423d02..4c8c6fd97ad 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -32,7 +32,6 @@
#include "mongo/base/status.h"
#include "mongo/db/query/runner.h"
-#include "mongo/db/query/runner_yield_policy.h"
namespace mongo {
@@ -83,9 +82,6 @@ namespace mongo {
//
/** TODO document me */
- void setYieldPolicy(Runner::YieldPolicy policy);
-
- /** TODO document me */
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
/** TOOD document me */
@@ -105,7 +101,6 @@ namespace mongo {
boost::scoped_ptr<WorkingSet> _workingSet;
boost::scoped_ptr<PlanStage> _root;
- boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy;
// Did somebody drop an index we care about or the namespace we're looking at? If so,
// we'll be killed.
diff --git a/src/mongo/db/query/runner.h b/src/mongo/db/query/runner.h
index 4a43fd9a5d0..474b93c1fd3 100644
--- a/src/mongo/db/query/runner.h
+++ b/src/mongo/db/query/runner.h
@@ -81,53 +81,6 @@ namespace mongo {
}
/**
- * The yielding policy of the runner. By default, a runner does not yield itself
- * (YIELD_MANUAL).
- */
- enum YieldPolicy {
- // Any call to getNext() may yield. In particular, the runner may be killed during any
- // call to getNext(). If this occurs, getNext() will return RUNNER_DEAD.
- //
- // If you are enabling autoyield, you must register the Runner with ClientCursor via
- // ClientCursor::registerRunner and deregister via ClientCursor::deregisterRunnerwhen
- // done. Registered runners are informed about DiskLoc deletions and Namespace
- // invalidations and other important events.
- //
- // Exception: This is not required if the Runner is cached inside of a ClientCursor.
- // This is only done if the Runner is cached and can be referred to by a cursor id.
- // This is not a popular thing to do.
- YIELD_AUTO,
-
- // Owner must yield manually if yields are requested. How to yield yourself:
- //
- // 0. Let's say you have Runner* runner.
- //
- // 1. Register your runner with ClientCursor. Registered runners are informed about
- // DiskLoc deletions and Namespace invalidation and other important events. Do this by
- // calling ClientCursor::registerRunner(runner). This could be done once when you get
- // your runner, or per-yield.
- //
- // 2. Call runner->saveState() before you yield.
- //
- // 3. Call RunnerYieldPolicy::staticYield(runner->ns(), NULL) to yield. Any state that
- // may change between yields must be checked by you. (For example, DiskLocs may not be
- // valid across yielding, indices may be dropped, etc.)
- //
- // 4. Call runner->restoreState() before using the runner again.
- //
- // 5. Your runner's next call to getNext may return RUNNER_DEAD.
- //
- // 6. When you're done with your runner, deregister it from ClientCursor via
- // ClientCursor::deregister(runner).
- YIELD_MANUAL,
- };
-
- /**
- * Set the yielding policy of the underlying runner. See the RunnerYieldPolicy enum above.
- */
- virtual void setYieldPolicy(YieldPolicy policy) = 0;
-
- /**
* Get the next result from the query.
*
* If objOut is not NULL, only results that have a BSONObj are returned. The BSONObj may
diff --git a/src/mongo/db/query/runner_yield_policy.h b/src/mongo/db/query/runner_yield_policy.h
deleted file mode 100644
index ee46e412953..00000000000
--- a/src/mongo/db/query/runner_yield_policy.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Copyright (C) 2013 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/db/clientcursor.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/util/elapsed_tracker.h"
-
-namespace mongo {
-
- class RunnerYieldPolicy {
- public:
- RunnerYieldPolicy() : _elapsedTracker(128, 10), _runnerYielding(NULL) { }
-
- ~RunnerYieldPolicy() {
- if (NULL != _runnerYielding) {
- // We were destructed mid-yield. Since we're being used to yield a runner, we have
- // to deregister the runner.
- if ( _runnerYielding->collection() ) {
- _runnerYielding->collection()->cursorCache()->deregisterRunner(_runnerYielding);
- }
- }
- }
-
- bool shouldYield() {
- return _elapsedTracker.intervalHasElapsed();
- }
-
- /**
- * Yield the provided runner, registering and deregistering it appropriately. Deal with
- * deletion during a yield by setting _runnerYielding to ensure deregistration.
- *
- * Provided runner MUST be YIELD_MANUAL.
- */
- bool yieldAndCheckIfOK(Runner* runner) {
- invariant(runner);
- invariant(runner->collection());
-
- int micros = ClientCursor::suggestYieldMicros();
-
- // If micros is not positive, no point in yielding, nobody waiting.
- //
- // TODO: Track how many times we actually yield, how many times micros is <0, etc.
- if (micros <= 0) { return true; }
-
- // If micros > 0, we should yield.
- runner->saveState();
- _runnerYielding = runner;
-
- runner->collection()->cursorCache()->registerRunner( _runnerYielding );
-
- // Note that this call checks for interrupt, and thus can throw if interrupt flag is set
- staticYield(micros);
-
- if ( runner->collection() ) {
- // if the runner was killed, runner->collection() will return NULL
- // so we don't deregister as it was done when killed
- runner->collection()->cursorCache()->deregisterRunner( _runnerYielding );
- }
- _runnerYielding = NULL;
- _elapsedTracker.resetLastTime();
- return runner->restoreState();
- }
-
- /**
- * Yield. Caller is in charge of all runner registration.
- *
- * Used for YIELD_AUTO runners.
- */
- void yield() {
- int micros = ClientCursor::suggestYieldMicros();
-
- if (micros > 0) {
- staticYield(micros);
- // TODO: When do we really want to reset this? Currently we reset it when we
- // actually yield. As such we'll keep on trying to yield once the tracker has
- // elapsed. If we reset it even if we don't yield, we'll wait until the time
- // interval elapses again to try yielding.
- _elapsedTracker.resetLastTime();
- }
- }
-
- static void staticYield(int micros) {
- ClientCursor::staticYield(micros, "");
- }
-
- private:
- ElapsedTracker _elapsedTracker;
- Runner* _runnerYielding;
- };
-
-} // namespace mongo
diff --git a/src/mongo/db/query/single_solution_runner.cpp b/src/mongo/db/query/single_solution_runner.cpp
index 05fdd079816..f6566a5000b 100644
--- a/src/mongo/db/query/single_solution_runner.cpp
+++ b/src/mongo/db/query/single_solution_runner.cpp
@@ -72,10 +72,6 @@ namespace mongo {
return _exec->restoreState();
}
- void SingleSolutionRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- _exec->setYieldPolicy(policy);
- }
-
void SingleSolutionRunner::invalidate(const DiskLoc& dl, InvalidationType type) {
_exec->invalidate(dl, type);
}
diff --git a/src/mongo/db/query/single_solution_runner.h b/src/mongo/db/query/single_solution_runner.h
index 99e573a4846..bdd697538b4 100644
--- a/src/mongo/db/query/single_solution_runner.h
+++ b/src/mongo/db/query/single_solution_runner.h
@@ -67,8 +67,6 @@ namespace mongo {
virtual bool restoreState();
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns();
diff --git a/src/mongo/db/query/subplan_runner.cpp b/src/mongo/db/query/subplan_runner.cpp
index 7360cf298dc..da39ec0eb8e 100644
--- a/src/mongo/db/query/subplan_runner.cpp
+++ b/src/mongo/db/query/subplan_runner.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/query/subplan_runner.h"
#include "mongo/client/dbclientinterface.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
@@ -113,7 +114,6 @@ namespace mongo {
_plannerParams(params),
_query(cq),
_killed(false),
- _policy(Runner::YIELD_MANUAL),
_ns(cq->getParsed().ns()) { }
SubplanRunner::~SubplanRunner() {
@@ -167,7 +167,6 @@ namespace mongo {
}
else {
_underlyingRunner.reset(runner);
- _underlyingRunner->setYieldPolicy(_policy);
}
}
@@ -318,9 +317,6 @@ namespace mongo {
mpr->addPlan(solutions[i], root, ws);
}
- // If we're allowed to yield, let the MPR know.
- mpr->setYieldPolicy(_policy);
-
// Calling pickBestPlan can yield so we must propagate events down to the MPR.
_underlyingRunner.reset(mpr);
@@ -404,7 +400,6 @@ namespace mongo {
// Takes ownership of all arguments.
mpr->addPlan(soln, root, ws);
- mpr->setYieldPolicy(_policy);
_underlyingRunner.reset(mpr);
return true;
@@ -451,18 +446,6 @@ namespace mongo {
return true;
}
- void SubplanRunner::setYieldPolicy(Runner::YieldPolicy policy) {
- if (_killed) { return; }
-
- // If somebody sets this before calling work() we need to know how to set it in our subquery
- // runners.
- _policy = policy;
-
- if (NULL != _underlyingRunner.get()) {
- _underlyingRunner->setYieldPolicy(policy);
- }
- }
-
void SubplanRunner::invalidate(const DiskLoc& dl, InvalidationType type) {
if (_killed) { return; }
diff --git a/src/mongo/db/query/subplan_runner.h b/src/mongo/db/query/subplan_runner.h
index 7415b50c934..3cefb07d8be 100644
--- a/src/mongo/db/query/subplan_runner.h
+++ b/src/mongo/db/query/subplan_runner.h
@@ -70,8 +70,6 @@ namespace mongo {
virtual bool restoreState();
- virtual void setYieldPolicy(Runner::YieldPolicy policy);
-
virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns();
@@ -117,8 +115,6 @@ namespace mongo {
bool _killed;
- Runner::YieldPolicy _policy;
-
boost::scoped_ptr<Runner> _underlyingRunner;
std::string _ns;
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index c69efefbe11..3fb794eff61 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -215,45 +215,6 @@ namespace CountTests {
boost::thread _dummyWriter;
};
- /**
- * The runCount() function yields deterministically with sufficient cursor iteration and a
- * mutually exclusive thread awaiting its mutex. SERVER-5428
- */
- class Yield : public Base {
- public:
- void run() {
- // Insert enough documents that counting them will exceed the iteration threshold
- // to trigger a yield.
- for( int i = 0; i < 1000; ++i ) {
- insert( BSON( "a" << 1 ) );
- }
-
- // Call runCount() under a read lock.
- dbtemprelease release;
- Client::ReadContext ctx( ns() );
-
- int numYieldsBeforeCount = numYields();
-
- string err;
- int errCode;
- ASSERT_EQUALS( 1000, runCount( ns(), countCommand( BSON( "a" << 1 ) ), err, errCode ) );
- ASSERT_EQUALS( "", err );
-
- int numYieldsAfterCount = numYields();
- int numYieldsDuringCount = numYieldsAfterCount - numYieldsBeforeCount;
-
- // The runCount() function yieled.
- ASSERT_NOT_EQUALS( 0, numYieldsDuringCount );
- ASSERT( 0 < numYieldsDuringCount );
- }
- private:
- int numYields() const {
- return cc().curop()->info()[ "numYields" ].Int();
- }
- // A writer client is registered while the test runs, causing runCount() to yield.
- WriterClientScope _writer;
- };
-
class All : public Suite {
public:
All() : Suite( "count" ) {
@@ -265,7 +226,6 @@ namespace CountTests {
add<Fields>();
add<QueryFields>();
add<IndexedRegex>();
- add<Yield>();
}
} myall;
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index 607ee1001c0..eb76afaebe2 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -172,7 +172,6 @@ namespace DocumentSourceTests {
uassertStatusOK(getRunner(ctx.ctx().db()->getCollection(ns), cq, &runnerBare));
_runner.reset(runnerBare);
- _runner->setYieldPolicy(Runner::YIELD_AUTO);
_runner->saveState();
_registration.reset(new ScopedRunnerRegistration(_runner.get()));
@@ -317,27 +316,6 @@ namespace DocumentSourceTests {
boost::thread _dummyWriter;
};
- /** DocumentSourceCursor yields deterministically when enough documents are scanned. */
- class Yield : public Base {
- public:
- void run() {
- // Insert enough documents that counting them will exceed the iteration threshold
- // to trigger a yield.
- for( int i = 0; i < 1000; ++i ) {
- client.insert( ns, BSON( "a" << 1 ) );
- }
- createSource();
- ASSERT_EQUALS( 0, cc().curop()->numYields() );
- // Iterate through all results.
- while( source()->getNext() );
- // The lock was yielded during iteration.
- ASSERT_GREATER_THAN(cc().curop()->numYields(), 0);
- }
- private:
- // An active writer is required to trigger yielding.
- WriterClientScope _writerScope;
- };
-
/** Test coalescing a limit into a cursor */
class LimitCoalesce : public Base {
public:
@@ -1931,7 +1909,6 @@ namespace DocumentSourceTests {
add<DocumentSourceCursor::Iterate>();
add<DocumentSourceCursor::Dispose>();
add<DocumentSourceCursor::IterateDispose>();
- add<DocumentSourceCursor::Yield>();
add<DocumentSourceCursor::LimitCoalesce>();
add<DocumentSourceLimit::DisposeSource>();
diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp
index 9a1774b61c8..30d7ca0535f 100644
--- a/src/mongo/dbtests/query_single_solution_runner.cpp
+++ b/src/mongo/dbtests/query_single_solution_runner.cpp
@@ -27,6 +27,7 @@
*/
#include "mongo/db/clientcursor.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/fetch.h"
@@ -189,10 +190,7 @@ namespace QuerySingleSolutionRunner {
BSONObj filterObj = fromjson("{_id: {$gt: 0}}");
scoped_ptr<SingleSolutionRunner> ssr(makeCollScanRunner(ctx.ctx(),filterObj));
-
- // Set up autoyielding.
registerRunner(ssr.get());
- ssr->setYieldPolicy(Runner::YIELD_AUTO);
BSONObj objOut;
ASSERT_EQUALS(Runner::RUNNER_ADVANCED, ssr->getNext(&objOut, NULL));
@@ -222,10 +220,7 @@ namespace QuerySingleSolutionRunner {
addIndex(indexSpec);
scoped_ptr<SingleSolutionRunner> ssr(makeIndexScanRunner(ctx.ctx(), indexSpec, 7, 10));
-
- // Set up autoyielding.
registerRunner(ssr.get());
- ssr->setYieldPolicy(Runner::YIELD_AUTO);
BSONObj objOut;
ASSERT_EQUALS(Runner::RUNNER_ADVANCED, ssr->getNext(&objOut, NULL));
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 713bc0177bb..b884392dfc9 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -31,6 +31,7 @@
*/
#include "mongo/client/dbclientcursor.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/plan_stage.h"
diff --git a/src/mongo/dbtests/runner_registry.cpp b/src/mongo/dbtests/runner_registry.cpp
index 4a079fb1b93..7071aafb3b0 100644
--- a/src/mongo/dbtests/runner_registry.cpp
+++ b/src/mongo/dbtests/runner_registry.cpp
@@ -32,6 +32,7 @@
*/
#include "mongo/client/dbclientcursor.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/plan_stage.h"
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 5b498bb5be6..29f06ab5f90 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -424,9 +424,6 @@ namespace mongo {
BSONObj max = Helpers::toKeyFormat( kp.extendRangeBound( _max, false ) );
auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
- // we can afford to yield here because any change to the base data that we might miss is
- // already being queued and will be migrated in the 'transferMods' stage
- runner->setYieldPolicy(Runner::YIELD_AUTO);
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
@@ -630,9 +627,6 @@ namespace mongo {
invariant( _collection == collection );
_collection->cursorCache()->deregisterRunner( this );
}
- virtual void setYieldPolicy(YieldPolicy policy) {
- invariant( false );
- }
virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) {
invariant( false );
}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 26b6f2a8bb3..528f0067c6d 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -147,8 +147,6 @@ namespace mongo {
auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max,
false, InternalPlanner::FORWARD));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
-
// Find the 'missingField' value used to represent a missing document field in a key of
// this index.
// NOTE A local copy of 'missingField' is made because indices may be
@@ -393,7 +391,6 @@ namespace mongo {
set<BSONObj> tooFrequentKeys;
splitKeys.push_back(prettyKey(idx->keyPattern(), currKey.getOwned()).extractFields( keyPattern ) );
- runner->setYieldPolicy(Runner::YIELD_AUTO);
while ( 1 ) {
while (Runner::RUNNER_ADVANCED == state) {
currCount++;
@@ -439,7 +436,6 @@ namespace mongo {
runner.reset(InternalPlanner::indexScan(collection, idx, min, max,
false, InternalPlanner::FORWARD));
- runner->setYieldPolicy(Runner::YIELD_AUTO);
state = runner->getNext(&currKey, NULL);
}