summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp12
-rw-r--r--src/mongo/db/cursor_manager.cpp27
-rw-r--r--src/mongo/db/cursor_manager.h17
-rw-r--r--src/mongo/db/exec/and_hash.cpp51
-rw-r--r--src/mongo/db/exec/and_hash.h13
-rw-r--r--src/mongo/db/exec/and_sorted.cpp29
-rw-r--r--src/mongo/db/exec/and_sorted.h14
-rw-r--r--src/mongo/db/exec/cached_plan.cpp20
-rw-r--r--src/mongo/db/exec/cached_plan.h6
-rw-r--r--src/mongo/db/exec/collection_scan.cpp35
-rw-r--r--src/mongo/db/exec/collection_scan.h1
-rw-r--r--src/mongo/db/exec/collection_scan_common.h3
-rw-r--r--src/mongo/db/exec/count_scan.cpp16
-rw-r--r--src/mongo/db/exec/count_scan.h3
-rw-r--r--src/mongo/db/exec/delete.cpp6
-rw-r--r--src/mongo/db/exec/fetch.cpp12
-rw-r--r--src/mongo/db/exec/fetch.h1
-rw-r--r--src/mongo/db/exec/geo_near.cpp14
-rw-r--r--src/mongo/db/exec/idhack.cpp17
-rw-r--r--src/mongo/db/exec/idhack.h1
-rw-r--r--src/mongo/db/exec/index_scan.cpp16
-rw-r--r--src/mongo/db/exec/index_scan.h1
-rw-r--r--src/mongo/db/exec/merge_sort.cpp23
-rw-r--r--src/mongo/db/exec/merge_sort.h65
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp15
-rw-r--r--src/mongo/db/exec/multi_iterator.h1
-rw-r--r--src/mongo/db/exec/multi_plan.cpp45
-rw-r--r--src/mongo/db/exec/multi_plan.h2
-rw-r--r--src/mongo/db/exec/near.cpp28
-rw-r--r--src/mongo/db/exec/near.h25
-rw-r--r--src/mongo/db/exec/or.cpp17
-rw-r--r--src/mongo/db/exec/or.h8
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h6
-rw-r--r--src/mongo/db/exec/plan_stage.cpp9
-rw-r--r--src/mongo/db/exec/plan_stage.h21
-rw-r--r--src/mongo/db/exec/plan_stats.h78
-rw-r--r--src/mongo/db/exec/projection.cpp5
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp5
-rw-r--r--src/mongo/db/exec/sort.cpp48
-rw-r--r--src/mongo/db/exec/sort.h6
-rw-r--r--src/mongo/db/exec/subplan.cpp6
-rw-r--r--src/mongo/db/exec/text_or.cpp11
-rw-r--r--src/mongo/db/exec/text_or.h1
-rw-r--r--src/mongo/db/exec/update.cpp6
-rw-r--r--src/mongo/db/exec/working_set.cpp2
-rw-r--r--src/mongo/db/exec/working_set.h8
-rw-r--r--src/mongo/db/exec/working_set_common.cpp31
-rw-r--r--src/mongo/db/exec/working_set_common.h9
-rw-r--r--src/mongo/db/exec/write_stage_common.cpp7
-rw-r--r--src/mongo/db/invalidation_type.h42
-rw-r--r--src/mongo/db/query/explain.cpp8
-rw-r--r--src/mongo/db/query/plan_executor.cpp6
-rw-r--r--src/mongo/db/query/plan_executor.h18
-rw-r--r--src/mongo/db/query/plan_ranker.h4
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h1
-rw-r--r--src/mongo/db/storage/mobile/mobile_record_store.cpp5
-rw-r--r--src/mongo/db/storage/record_store.h24
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp41
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp49
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp18
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp67
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp46
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp77
65 files changed, 218 insertions, 999 deletions
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index b5a0549a20c..e465d5839c3 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -513,9 +513,6 @@ void CollectionImpl::notifyCappedWaitersIfNeeded() {
Status CollectionImpl::aboutToDeleteCapped(OperationContext* opCtx,
const RecordId& loc,
RecordData data) {
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
-
BSONObj doc = data.releaseToBson();
int64_t* const nullKeysDeleted = nullptr;
_indexCatalog.unindexRecord(opCtx, doc, loc, false, nullKeysDeleted);
@@ -550,9 +547,6 @@ void CollectionImpl::deleteDocument(OperationContext* opCtx,
deletedDoc.emplace(doc.value().getOwned());
}
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_DELETION);
-
int64_t keysDeleted;
_indexCatalog.unindexRecord(opCtx, doc.value(), loc, noWarn, &keysDeleted);
if (opDebug) {
@@ -684,12 +678,9 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
Status CollectionImpl::recordStoreGoingToUpdateInPlace(OperationContext* opCtx,
const RecordId& loc) {
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
return Status::OK();
}
-
bool CollectionImpl::updateWithDamagesSupported() const {
if (_validator)
return false;
@@ -708,9 +699,6 @@ StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages(
invariant(oldRec.snapshotId() == opCtx->recoveryUnit()->getSnapshotId());
invariant(updateWithDamagesSupported());
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(opCtx, loc, INVALIDATION_MUTATION);
-
auto newRecStatus =
_recordStore->updateWithDamages(opCtx, loc, oldRec.value(), damageSource, damages);
diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp
index 6cfc442ed67..b0537afcf95 100644
--- a/src/mongo/db/cursor_manager.cpp
+++ b/src/mongo/db/cursor_manager.cpp
@@ -484,33 +484,6 @@ void CursorManager::invalidateAll(OperationContext* opCtx,
}
}
-void CursorManager::invalidateDocument(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- invariant(!isGlobalManager()); // The global cursor manager should never receive invalidations.
- if (supportsDocLocking()) {
- // If a storage engine supports doc locking, then we do not need to invalidate.
- // The transactional boundaries of the operation protect us.
- return;
- }
-
- auto allExecPartitions = _registeredPlanExecutors.lockAllPartitions();
- for (auto&& partition : allExecPartitions) {
- for (auto&& exec : partition) {
- exec->invalidate(opCtx, dl, type);
- }
- }
-
- auto allPartitions = _cursorMap->lockAllPartitions();
- for (auto&& partition : allPartitions) {
- for (auto&& entry : partition) {
- auto exec = entry.second->getExecutor();
- exec->invalidate(opCtx, dl, type);
- }
- }
-}
-
bool CursorManager::cursorShouldTimeout_inlock(const ClientCursor* cursor, Date_t now) {
if (cursor->isNoTimeout() || cursor->_operationUsingCursor) {
return false;
diff --git a/src/mongo/db/cursor_manager.h b/src/mongo/db/cursor_manager.h
index 24d83a6e981..5714816d450 100644
--- a/src/mongo/db/cursor_manager.h
+++ b/src/mongo/db/cursor_manager.h
@@ -34,7 +34,6 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/cursor_id.h"
#include "mongo/db/generic_cursor.h"
-#include "mongo/db/invalidation_type.h"
#include "mongo/db/kill_sessions.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/record_id.h"
@@ -125,12 +124,6 @@ public:
const std::string& reason);
/**
- * Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
- * must called *before* the provided RecordId is about to be deleted or mutated.
- */
- void invalidateDocument(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
-
- /**
* Destroys cursors that have been inactive for too long.
*
* Returns the number of cursors that were timed out.
@@ -138,10 +131,10 @@ public:
std::size_t timeoutCursors(OperationContext* opCtx, Date_t now);
/**
- * Register an executor so that it can be notified of deletions, invalidations, collection
- * drops, or the like during yields. Must be called before an executor yields. Registration
- * happens automatically for yielding PlanExecutors, so this should only be called by a
- * PlanExecutor itself. Returns a token that must be stored for use during deregistration.
+ * Register an executor so that it can be notified of events that cause the PlanExecutor to be
+ * killed. Must be called before an executor yields. Registration happens automatically for
+ * yielding PlanExecutors, so this should only be called by a PlanExecutor itself. Returns a
+ * token that must be stored for use during deregistration.
*/
Partitioned<stdx::unordered_set<PlanExecutor*>>::PartitionId registerExecutor(
PlanExecutor* exec);
@@ -284,7 +277,7 @@ private:
// pointers to PlanExecutors are unowned, and a PlanExecutor will notify the CursorManager when
// it is being destroyed. ClientCursors are owned by the CursorManager, except when they are in
// use by a ClientCursorPin. When in use by a pin, an unowned pointer remains to ensure they
- // still receive invalidations while in use.
+ // still receive kill notifications while in use.
//
// There are several mutexes at work to protect concurrent access to data structures managed by
// this cursor manager. The two registration data structures '_registeredPlanExecutors' and
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 0ae93e9ad3a..d2d0661ffb2 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -383,57 +383,6 @@ PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
}
}
-// TODO SERVER-16857: Delete this method, as the invalidation mechanism was only needed for the
-// MMAPv1 storage engine.
-void AndHashStage::doInvalidate(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- // TODO remove this since calling isEOF is illegal inside of doInvalidate().
- if (isEOF()) {
- return;
- }
-
- // Invalidation can happen to our warmup results. If that occurs just
- // flag it and forget about it.
- for (size_t i = 0; i < _lookAheadResults.size(); ++i) {
- if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
- WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
- if (member->hasRecordId() && member->recordId == dl) {
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- _lookAheadResults[i] = WorkingSet::INVALID_ID;
- }
- }
- }
-
- // If it's a deletion, we have to forget about the RecordId, and since the AND-ing is by
- // RecordId we can't continue processing it even with the object.
- //
- // If it's a mutation the predicates implied by the AND-ing may no longer be true.
- //
- // So, we flag and try to pick it up later.
- DataMap::iterator it = _dataMap.find(dl);
- if (_dataMap.end() != it) {
- WorkingSetID id = it->second;
- WorkingSetMember* member = _ws->get(id);
- verify(member->recordId == dl);
-
- if (_hashingChildren) {
- ++_specificStats.flaggedInProgress;
- } else {
- ++_specificStats.flaggedButPassed;
- }
-
- // Update memory stats.
- _memUsage -= member->getMemUsage();
-
- // The RecordId is about to be invalidated. Fetch it and clear the RecordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
-
- // And don't return it from this stage.
- _dataMap.erase(it);
- }
-}
-
unique_ptr<PlanStageStats> AndHashStage::getStats() {
_commonStats.isEOF = isEOF();
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 92996adba31..fa558922223 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -40,15 +40,10 @@
namespace mongo {
/**
- * Reads from N children, each of which must have a valid RecordId. Uses a hash table to
- * intersect the outputs of the N children, and outputs the intersection.
+ * Reads from N children, each of which must have a valid RecordId. Uses a hash table to intersect
+ * the outputs of the N children based on their record ids, and outputs the intersection.
*
- * Preconditions: Valid RecordId. More than one child.
- *
- * Any RecordId that we keep a reference to that is invalidated before we are able to return it
- * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
- * must be fully matched later.
+ * Preconditions: Valid RecordId. More than one child.
*/
class AndHashStage final : public PlanStage {
public:
@@ -73,8 +68,6 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_AND_HASH;
}
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index b5e3cfa24fd..d81e897cd68 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -227,35 +227,6 @@ PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out
}
}
-
-// TODO SERVER-16857: Delete this method, as the invalidation mechanism was only needed for the
-// MMAPv1 storage engine.
-void AndSortedStage::doInvalidate(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- // TODO remove this since calling isEOF is illegal inside of doInvalidate().
- if (isEOF()) {
- return;
- }
-
- if (dl == _targetRecordId) {
- // We're in the middle of moving children forward until they hit _targetRecordId, which is
- // no
- // longer a valid target. If it's a deletion we can't AND it with anything, if it's a
- // mutation the predicates implied by the AND may no longer be true. So no matter what,
- // fetch it, flag for review, and find another _targetRecordId.
- ++_specificStats.flagged;
-
- // The RecordId could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, _ws->get(_targetId), _collection);
-
- _targetId = WorkingSet::INVALID_ID;
- _targetNode = numeric_limits<size_t>::max();
- _targetRecordId = RecordId();
- _workingTowardRep = std::queue<size_t>();
- }
-}
-
unique_ptr<PlanStageStats> AndSortedStage::getStats() {
_commonStats.isEOF = isEOF();
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 28ba43b954a..889b38c69c5 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -39,16 +39,10 @@
namespace mongo {
/**
- * Reads from N children, each of which must have a valid RecordId. Assumes each child produces
- * RecordIds in sorted order. Outputs the intersection of the RecordIds outputted by the
- * children.
+ * Reads from N children, each of which must have a valid RecordId. Assumes each child produces
+ * RecordIds in sorted order. Outputs the intersection of the RecordIds outputted by the children.
*
- * Preconditions: Valid RecordId. More than one child.
- *
- * Any RecordId that we keep a reference to that is invalidated before we are able to return it
- * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
- * must be fully matched later.
+ * Preconditions: Valid RecordId. More than one child.
*/
class AndSortedStage final : public PlanStage {
public:
@@ -59,8 +53,6 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_AND_SORTED;
}
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 7e59ea230c6..01ca4bd2e37 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -100,7 +100,7 @@ Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
WorkingSetMember* member = _ws->get(id);
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we yield.
member->makeObjOwnedIfNeeded();
- _results.push_back(id);
+ _results.push(id);
if (_results.size() >= numResults) {
// Once a plan returns enough results, stop working. Update cache with stats
@@ -194,7 +194,10 @@ Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
// We're going to start over with a new plan. Clear out info from our old plan.
- _results.clear();
+ {
+ std::queue<WorkingSetID> emptyQueue;
+ _results.swap(emptyQueue);
+ }
_ws->clear();
_children.clear();
@@ -287,7 +290,7 @@ PlanStage::StageState CachedPlanStage::doWork(WorkingSetID* out) {
// First exhaust any results buffered during the trial period.
if (!_results.empty()) {
*out = _results.front();
- _results.pop_front();
+ _results.pop();
return PlanStage::ADVANCED;
}
@@ -295,17 +298,6 @@ PlanStage::StageState CachedPlanStage::doWork(WorkingSetID* out) {
return child()->work(out);
}
-void CachedPlanStage::doInvalidate(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- for (auto it = _results.begin(); it != _results.end(); ++it) {
- WorkingSetMember* member = _ws->get(*it);
- if (member->hasRecordId() && member->recordId == dl) {
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- }
- }
-}
-
std::unique_ptr<PlanStageStats> CachedPlanStage::getStats() {
_commonStats.isEOF = isEOF();
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index 9d149860811..e8de18d7bca 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -28,8 +28,8 @@
#pragma once
-#include <list>
#include <memory>
+#include <queue>
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h"
@@ -65,8 +65,6 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_CACHED_PLAN;
}
@@ -136,7 +134,7 @@ private:
std::unique_ptr<QuerySolution> _replannedQs;
// Any results produced during trial period execution are kept here.
- std::list<WorkingSetID> _results;
+ std::queue<WorkingSetID> _results;
// When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
// to use to pull the record into memory. We take ownership of the RecordFetcher here,
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 162caf4cf3e..e2c51ea7304 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -121,12 +121,12 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
if (!_lastSeenId.isNull()) {
invariant(_params.tailable);
- // Seek to where we were last time. If it no longer exists, mark us as dead
- // since we want to signal an error rather than silently dropping data from the
- // stream. This is related to the _lastSeenId handling in invalidate. Note that
- // we want to return the record *after* this one since we have already returned
- // this one. This is only possible in the tailing case because that is the only
- // time we'd need to create a cursor after already getting a record out of it.
+ // Seek to where we were last time. If it no longer exists, mark us as dead since we
+ // want to signal an error rather than silently dropping data from the stream.
+ //
+ // Note that we want to return the record *after* this one since we have already
+ // returned this one. This is only possible in the tailing case because that is the
+ // only time we'd need to create a cursor after already getting a record out of it.
if (!_cursor->seekExact(_lastSeenId)) {
_isDead = true;
Status status(ErrorCodes::CappedPositionLost,
@@ -234,29 +234,6 @@ bool CollectionScan::isEOF() {
return _commonStats.isEOF || _isDead;
}
-void CollectionScan::doInvalidate(OperationContext* opCtx,
- const RecordId& id,
- InvalidationType type) {
- // We don't care about mutations since we apply any filters to the result when we (possibly)
- // return it.
- if (INVALIDATION_DELETION != type) {
- return;
- }
-
- // If we're here, 'id' is being deleted.
-
- // Deletions can harm the underlying RecordCursor so we must pass them down.
- if (_cursor) {
- _cursor->invalidate(opCtx, id);
- }
-
- if (_params.tailable && id == _lastSeenId) {
- // This means that deletes have caught up to the reader. We want to error in this case
- // so readers don't miss potentially important data.
- _isDead = true;
- }
-}
-
void CollectionScan::doSaveState() {
if (_cursor) {
_cursor->save();
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index 65ee541b6e5..1a997a01ac1 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -58,7 +58,6 @@ public:
StageState doWork(WorkingSetID* out) final;
bool isEOF() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
void doSaveState() final;
void doRestoreState() final;
void doDetachFromOperationContext() final;
diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h
index 50f3c25975e..302b38f131a 100644
--- a/src/mongo/db/exec/collection_scan_common.h
+++ b/src/mongo/db/exec/collection_scan_common.h
@@ -45,8 +45,7 @@ struct CollectionScanParams {
// not owned
const Collection* collection = nullptr;
- // isNull by default. If you specify any value for this, you're responsible for the RecordId
- // not being invalidated before the first call to work(...).
+ // The RecordId to which we should seek to as the first document of the scan.
RecordId start;
// If present, the collection scan will stop and return EOF the first time it sees a document
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index f6746dfa50d..a5132ffe9a8 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -94,7 +94,6 @@ CountScan::CountScan(OperationContext* opCtx, CountScanParams params, WorkingSet
/*compareFieldNames*/ false) <= 0);
}
-
PlanStage::StageState CountScan::doWork(WorkingSetID* out) {
if (_commonStats.isEOF)
return PlanStage::IS_EOF;
@@ -166,21 +165,6 @@ void CountScan::doReattachToOperationContext() {
_cursor->reattachToOperationContext(getOpCtx());
}
-void CountScan::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // The only state we're responsible for holding is what RecordIds to drop. If a document
- // mutates the underlying index cursor will deal with it.
- if (INVALIDATION_MUTATION == type) {
- return;
- }
-
- // If we see this RecordId again, it may not be the same document it was before, so we want
- // to return it if we see it again.
- stdx::unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
- if (it != _returned.end()) {
- _returned.erase(it);
- }
-}
-
unique_ptr<PlanStageStats> CountScan::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, STAGE_COUNT_SCAN);
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index f3f84ff8d57..f9c69cdede1 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -114,7 +114,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_COUNT_SCAN;
@@ -136,7 +135,7 @@ private:
std::unique_ptr<SortedDataInterface::Cursor> _cursor;
// Could our index have duplicates? If so, we use _returned to dedup.
- bool _shouldDedup;
+ const bool _shouldDedup;
stdx::unordered_set<RecordId, RecordId::Hasher> _returned;
CountScanParams _params;
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 623535ef946..7c6fd19b5ed 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -157,11 +157,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) {
// We want to free this member when we return, unless we need to retry deleting or returning it.
ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
- if (!member->hasRecordId()) {
- // We expect to be here because of an invalidation causing a force-fetch.
- ++_specificStats.nInvalidateSkips;
- return PlanStage::NEED_TIME;
- }
+ invariant(member->hasRecordId());
RecordId recordId = member->recordId;
// Deletes can't have projections. This means that covering analysis will always add
// a fetch. We should always get fetched data, and never just key data.
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 6895a58ca3b..001e7010c54 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -164,18 +164,6 @@ void FetchStage::doReattachToOperationContext() {
_cursor->reattachToOperationContext(getOpCtx());
}
-void FetchStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // It's possible that the recordId getting invalidated is the one we're about to
- // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
- if (WorkingSet::INVALID_ID != _idRetrying) {
- WorkingSetMember* member = _ws->get(_idRetrying);
- if (member->hasRecordId() && (member->recordId == dl)) {
- // Fetch it now and kill the recordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- }
- }
-}
-
PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
WorkingSetID memberID,
WorkingSetID* out) {
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index a1c78c970c6..02c143a2c33 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -64,7 +64,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_FETCH;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 4e19b43a662..80923c4f55e 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -739,11 +739,8 @@ StatusWith<NearStage::CoveredInterval*> //
_children.emplace_back(
new FetchStageWithMatch(opCtx, workingSet, scan, docMatcher, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
- true,
- nextBounds.getInner(),
- nextBounds.getOuter(),
- isLastInterval));
+ return StatusWith<CoveredInterval*>(new CoveredInterval(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
}
StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
@@ -1094,11 +1091,8 @@ StatusWith<NearStage::CoveredInterval*> //
// FetchStage owns index scan
_children.emplace_back(new FetchStage(opCtx, workingSet, scan, _nearParams.filter, collection));
- return StatusWith<CoveredInterval*>(new CoveredInterval(_children.back().get(),
- true,
- nextBounds.getInner(),
- nextBounds.getOuter(),
- isLastInterval));
+ return StatusWith<CoveredInterval*>(new CoveredInterval(
+ _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
}
StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index d8f4f12ba82..366d9f7ca56 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -208,23 +208,6 @@ void IDHackStage::doReattachToOperationContext() {
_recordCursor->reattachToOperationContext(getOpCtx());
}
-void IDHackStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
- if (INVALIDATION_MUTATION == type) {
- return;
- }
-
- // It's possible that the RecordId getting invalidated is the one we're about to
- // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
- if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
- WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
- if (member->hasRecordId() && (member->recordId == dl)) {
- // Fetch it now and kill the RecordId.
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- }
- }
-}
-
// static
bool IDHackStage::supportsQuery(Collection* collection, const CanonicalQuery& query) {
return !query.getQueryRequest().showRecordId() && query.getQueryRequest().getHint().isEmpty() &&
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index efb3b4aace3..16a7982860f 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -69,7 +69,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
/**
* ID Hack has a very strict criteria for the queries it supports.
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 1110ed78799..45f45da73f4 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -258,22 +258,6 @@ void IndexScan::doReattachToOperationContext() {
_indexCursor->reattachToOperationContext(getOpCtx());
}
-void IndexScan::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // The only state we're responsible for holding is what RecordIds to drop. If a document
- // mutates the underlying index cursor will deal with it.
- if (INVALIDATION_MUTATION == type) {
- return;
- }
-
- // If we see this RecordId again, it may not be the same document it was before, so we want
- // to return it if we see it again.
- stdx::unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
- if (it != _returned.end()) {
- ++_specificStats.seenInvalidated;
- _returned.erase(it);
- }
-}
-
std::unique_ptr<PlanStageStats> IndexScan::getStats() {
// WARNING: this could be called even if the collection was dropped. Do not access any
// catalog information here.
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index e9f86959f99..d1e1bffbe24 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -135,7 +135,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_IXSCAN;
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index b8343869a60..0e2249fdb74 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -166,29 +166,6 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) {
return PlanStage::ADVANCED;
}
-
-void MergeSortStage::doInvalidate(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- // Go through our data and see if we're holding on to the invalidated RecordId.
- for (list<StageWithValue>::iterator valueIt = _mergingData.begin();
- valueIt != _mergingData.end();
- valueIt++) {
- WorkingSetMember* member = _ws->get(valueIt->id);
- if (member->hasRecordId() && (dl == member->recordId)) {
- // Fetch the about-to-be mutated result.
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- ++_specificStats.forcedFetches;
- }
- }
-
- // If we see the deleted RecordId again it is not the same record as it once was so we still
- // want to return it.
- if (_dedup && INVALIDATION_DELETION == type) {
- _seen.erase(dl);
- }
-}
-
// Is lhs less than rhs? Note that priority_queue is a max heap by default so we invert
// the return from the expected value.
bool MergeSortStage::StageWithValueComparison::operator()(const MergingRef& lhs,
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index a1dac80c770..25b4127f786 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -66,8 +66,6 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_SORT_MERGE;
}
@@ -79,49 +77,15 @@ public:
static const char* kStageType;
private:
- // Not owned by us.
- const Collection* _collection;
-
- // Not owned by us.
- WorkingSet* _ws;
-
- // The pattern that we're sorting by.
- BSONObj _pattern;
-
- // Null if this merge sort stage orders strings according to simple binary compare. If non-null,
- // represents the collator used to compare strings.
- const CollatorInterface* _collator;
-
- // Are we deduplicating on RecordId?
- bool _dedup;
-
- // Which RecordIds have we seen?
- stdx::unordered_set<RecordId, RecordId::Hasher> _seen;
-
- // In order to pick the next smallest value, we need each child work(...) until it produces
- // a result. This is the queue of children that haven't given us a result yet.
- std::queue<PlanStage*> _noResultToMerge;
-
- // There is some confusing STL wrangling going on below. Here's a guide:
- //
- // We want to keep a priority_queue of results so we can quickly return the min result.
- //
- // If we receive an invalidate, we need to iterate over any cached state to see if the
- // invalidate is relevant.
- //
- // We can't iterate over a priority_queue, so we keep the actual cached state in a list and
- // have a priority_queue of iterators into that list.
- //
- // Why an iterator instead of a pointer? We need to be able to use the information in the
- // priority_queue to remove the item from the list and quickly.
-
struct StageWithValue {
StageWithValue() : id(WorkingSet::INVALID_ID), stage(NULL) {}
WorkingSetID id;
PlanStage* stage;
};
- // We have a priority queue of these.
+ // This stage maintains a priority queue of results from each child stage so that it can quickly
+ // return the next result according to the sort order. A value in the priority queue is a
+ // MergingRef, an iterator which refers to a buffered (WorkingSetMember, child stage) pair.
typedef std::list<StageWithValue>::iterator MergingRef;
// The comparison function used in our priority queue.
@@ -140,6 +104,29 @@ private:
const CollatorInterface* _collator;
};
+ // Not owned by us.
+ const Collection* _collection;
+
+ // Not owned by us.
+ WorkingSet* _ws;
+
+ // The pattern that we're sorting by.
+ BSONObj _pattern;
+
+ // Null if this merge sort stage orders strings according to simple binary compare. If non-null,
+ // represents the collator used to compare strings.
+ const CollatorInterface* _collator;
+
+ // Are we deduplicating on RecordId?
+ const bool _dedup;
+
+ // Which RecordIds have we seen?
+ stdx::unordered_set<RecordId, RecordId::Hasher> _seen;
+
+ // In order to pick the next smallest value, we need each child work(...) until it produces
+ // a result. This is the queue of children that haven't given us a result yet.
+ std::queue<PlanStage*> _noResultToMerge;
+
// The min heap of the results we're returning.
std::priority_queue<MergingRef, std::vector<MergingRef>, StageWithValueComparison> _merging;
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index 6652ab1c798..64056ff9c67 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -131,21 +131,6 @@ void MultiIteratorStage::doReattachToOperationContext() {
}
}
-void MultiIteratorStage::doInvalidate(OperationContext* opCtx,
- const RecordId& dl,
- InvalidationType type) {
- switch (type) {
- case INVALIDATION_DELETION:
- for (size_t i = 0; i < _iterators.size(); i++) {
- _iterators[i]->invalidate(opCtx, dl);
- }
- break;
- case INVALIDATION_MUTATION:
- // no-op
- break;
- }
-}
-
unique_ptr<PlanStageStats> MultiIteratorStage::getStats() {
unique_ptr<PlanStageStats> ret =
make_unique<PlanStageStats>(_commonStats, STAGE_MULTI_ITERATOR);
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index 6e7e6946472..78909adc72d 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -60,7 +60,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
// Returns empty PlanStageStats object
std::unique_ptr<PlanStageStats> getStats() final;
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 04e79b2ecbf..4b2d60269f7 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -111,7 +111,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
// Look for an already produced result that provides the data the caller wants.
if (!bestPlan.results.empty()) {
*out = bestPlan.results.front();
- bestPlan.results.pop_front();
+ bestPlan.results.pop();
return PlanStage::ADVANCED;
}
@@ -236,7 +236,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
std::vector<size_t> candidateOrder = ranking->candidateOrder;
CandidatePlan& bestCandidate = _candidates[_bestPlanIdx];
- std::list<WorkingSetID>& alreadyProduced = bestCandidate.results;
+ const auto& alreadyProduced = bestCandidate.results;
const auto& bestSolution = bestCandidate.solution;
LOG(5) << "Winning solution:\n" << redact(bestSolution->toString());
@@ -360,7 +360,7 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
// Ensure that the BSONObj underlying the WorkingSetMember is owned in case we choose to
// return the results from the 'candidate' plan.
member->makeObjOwnedIfNeeded();
- candidate.results.push_back(id);
+ candidate.results.push(id);
// Once a plan returns enough results, stop working.
if (candidate.results.size() >= numResults) {
@@ -410,45 +410,6 @@ bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolic
return !doneWorking;
}
-namespace {
-
-void invalidateHelper(OperationContext* opCtx,
- WorkingSet* ws, // may flag for review
- const RecordId& recordId,
- list<WorkingSetID>* idsToInvalidate,
- const Collection* collection) {
- for (auto it = idsToInvalidate->begin(); it != idsToInvalidate->end(); ++it) {
- WorkingSetMember* member = ws->get(*it);
- if (member->hasRecordId() && member->recordId == recordId) {
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, collection);
- }
- }
-}
-
-} // namespace
-
-void MultiPlanStage::doInvalidate(OperationContext* opCtx,
- const RecordId& recordId,
- InvalidationType type) {
- if (_failure) {
- return;
- }
-
- if (bestPlanChosen()) {
- CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- invalidateHelper(opCtx, bestPlan.ws, recordId, &bestPlan.results, _collection);
- if (hasBackupPlan()) {
- CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
- invalidateHelper(opCtx, backupPlan.ws, recordId, &backupPlan.results, _collection);
- }
- } else {
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- invalidateHelper(
- opCtx, _candidates[ix].ws, recordId, &_candidates[ix].results, _collection);
- }
- }
-}
-
bool MultiPlanStage::hasBackupPlan() const {
return kNoSuchPlan != _backupPlanIdx;
}
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 5d6369e3c0d..5b118653d9a 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -84,8 +84,6 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_MULTI_PLAN;
}
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 7424adca28d..8621c8fa2b7 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -57,12 +57,10 @@ NearStage::NearStage(OperationContext* opCtx,
NearStage::~NearStage() {}
NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
- bool dedupCovering,
double minDistance,
double maxDistance,
bool inclusiveMax)
: covering(covering),
- dedupCovering(dedupCovering),
minDistance(minDistance),
maxDistance(maxDistance),
inclusiveMax(inclusiveMax) {}
@@ -187,7 +185,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
WorkingSetMember* nextMember = _workingSet->get(nextMemberID);
// The child stage may not dedup so we must dedup them ourselves.
- if (_nextInterval->dedupCovering && nextMember->hasRecordId()) {
+ if (nextMember->hasRecordId()) {
if (_seenDocuments.end() != _seenDocuments.find(nextMember->recordId)) {
_workingSet->free(nextMemberID);
return PlanStage::NEED_TIME;
@@ -212,7 +210,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
nextMember->makeObjOwnedIfNeeded();
_resultBuffer.push(SearchResult(nextMemberID, memberDistance));
- // Store the member's RecordId, if available, for quick invalidation
+ // Store the member's RecordId, if available, for deduping.
if (nextMember->hasRecordId()) {
_seenDocuments.insert(std::make_pair(nextMember->recordId, nextMemberID));
}
@@ -266,9 +264,10 @@ PlanStage::StageState NearStage::advanceNext(WorkingSetID* toReturn) {
// The next document in _resultBuffer is in the search interval, so we can return it.
_resultBuffer.pop();
- // If we're returning something, take it out of our RecordId -> WSID map so that future
- // calls to invalidate don't cause us to take action for a RecordId we're done with.
*toReturn = resultID;
+
+ // If we're returning something, take it out of our RecordId -> WSID map. This keeps
+ // '_seenDocuments' in sync with '_resultBuffer'.
WorkingSetMember* member = _workingSet->get(*toReturn);
if (member->hasRecordId()) {
_seenDocuments.erase(member->recordId);
@@ -284,23 +283,6 @@ bool NearStage::isEOF() {
return SearchState_Finished == _searchState;
}
-void NearStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // If a result is in _resultBuffer and has a RecordId it will be in _seenDocuments as
- // well. It's safe to return the result w/o the RecordId, so just fetch the result.
- stdx::unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt =
- _seenDocuments.find(dl);
-
- if (seenIt != _seenDocuments.end()) {
- WorkingSetMember* member = _workingSet->get(seenIt->second);
- verify(member->hasRecordId());
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
- verify(!member->hasRecordId());
-
- // Don't keep it around in the seen map since there's no valid RecordId anymore
- _seenDocuments.erase(seenIt);
- }
-}
-
unique_ptr<PlanStageStats> NearStage::getStats() {
unique_ptr<PlanStageStats> ret = make_unique<PlanStageStats>(_commonStats, _stageType);
ret->specific.reset(_specificStats.clone());
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index d5971aa3949..ab13d2e16b4 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -80,15 +80,8 @@ namespace mongo {
* correctness does not depend on interval size.
*
* The child stage may return duplicate documents, so it is the responsibility of NearStage to
- * deduplicate. Every document in _resultBuffer is kept track of in _seenDocuments. When a
- * document is returned or invalidated, it is removed from _seenDocuments.
- *
- * TODO: If a document is indexed in multiple cells (Polygons, PolyLines, etc.), there is a
- * possibility that it will be returned more than once. Since doInvalidate() force fetches a
- * document and removes it from _seenDocuments, NearStage will not deduplicate if it encounters
- * another instance of this document. This will only occur if two cells for a document are in the
- * same interval and the invalidation occurs between the scan of the first cell and the second, so
- * NearStage no longer knows that it's seen this document before.
+ * deduplicate. Every document in _resultBuffer is kept track of in _seenDocuments. When a document
+ * is returned, it is removed from _seenDocuments.
*
* TODO: Right now the interface allows the nextCovering() to be adaptive, but doesn't allow
* aborting and shrinking a covered range being buffered if we guess wrong.
@@ -102,8 +95,6 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final;
std::unique_ptr<PlanStageStats> getStats() final;
const SpecificStats* getSpecificStats() const final;
@@ -169,9 +160,7 @@ private:
// Generic state for progressive near search
//
- // Not owned here
WorkingSet* const _workingSet;
- // Not owned here, used for fetching buffered results before invalidation
Collection* const _collection;
// A progressive search works in stages of buffering and then advancing
@@ -182,8 +171,7 @@ private:
SearchState_Finished
} _searchState;
- // May need to track disklocs from the child stage to do our own deduping, also to do
- // invalidation of buffered results.
+ // Tracks RecordIds from the child stage to do our own deduping.
stdx::unordered_map<RecordId, WorkingSetID, RecordId::Hasher> _seenDocuments;
// Stats for the stage covering this interval
@@ -212,14 +200,9 @@ private:
* A covered interval over which a portion of a near search can be run.
*/
struct NearStage::CoveredInterval {
- CoveredInterval(PlanStage* covering,
- bool dedupCovering,
- double minDistance,
- double maxDistance,
- bool inclusiveMax);
+ CoveredInterval(PlanStage* covering, double minDistance, double maxDistance, bool inclusiveMax);
PlanStage* const covering; // Owned in PlanStage::_children.
- const bool dedupCovering;
const double minDistance;
const double maxDistance;
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index 89b811a2487..2d79d242ea2 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -120,23 +120,6 @@ PlanStage::StageState OrStage::doWork(WorkingSetID* out) {
return childStatus;
}
-void OrStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // TODO remove this since calling isEOF is illegal inside of doInvalidate().
- if (isEOF()) {
- return;
- }
-
- // If we see DL again it is not the same record as it once was so we still want to
- // return it.
- if (_dedup && INVALIDATION_DELETION == type) {
- stdx::unordered_set<RecordId, RecordId::Hasher>::iterator it = _seen.find(dl);
- if (_seen.end() != it) {
- ++_specificStats.recordIdsForgotten;
- _seen.erase(dl);
- }
- }
-}
-
unique_ptr<PlanStageStats> OrStage::getStats() {
_commonStats.isEOF = isEOF();
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 0fd68249a5d..aa1a4b15015 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -37,11 +37,9 @@
namespace mongo {
/**
- * This stage outputs the union of its children. It optionally deduplicates on RecordId.
+ * This stage outputs the union of its children. It optionally deduplicates on RecordId.
*
* Preconditions: Valid RecordId.
- *
- * If we're deduping, we may fail to dedup any invalidated RecordId properly.
*/
class OrStage final : public PlanStage {
public:
@@ -55,8 +53,6 @@ public:
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_OR;
}
@@ -78,7 +74,7 @@ private:
size_t _currentChild;
// True if we dedup on RecordId, false otherwise.
- bool _dedup;
+ const bool _dedup;
// Which RecordIds have we returned?
stdx::unordered_set<RecordId, RecordId::Hasher> _seen;
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 04ac712de71..3793fc7c3ba 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -68,12 +68,6 @@ public:
MONGO_UNREACHABLE;
}
- void doInvalidate(OperationContext* opCtx, const RecordId& rid, InvalidationType type) final {
- // A PlanExecutor with a PipelineProxyStage should be registered with the global cursor
- // manager, so should not receive invalidations.
- MONGO_UNREACHABLE;
- }
-
/**
* Pass through the last oplog timestamp from the proxied pipeline.
*/
diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp
index c532242264e..5cc653c5b8c 100644
--- a/src/mongo/db/exec/plan_stage.cpp
+++ b/src/mongo/db/exec/plan_stage.cpp
@@ -74,15 +74,6 @@ void PlanStage::restoreState() {
doRestoreState();
}
-void PlanStage::invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- for (auto&& child : _children) {
- child->invalidate(opCtx, dl, type);
- }
-
- doInvalidate(opCtx, dl, type);
-}
-
void PlanStage::detachFromOperationContext() {
invariant(_opCtx);
_opCtx = nullptr;
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 33204c84839..896b8878e41 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -33,7 +33,6 @@
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/invalidation_type.h"
namespace mongo {
@@ -246,21 +245,6 @@ public:
*/
void reattachToOperationContext(OperationContext* opCtx);
- /**
- * Notifies a stage that a RecordId is going to be deleted (or in-place updated) so that the
- * stage can invalidate or modify any state required to continue processing without this
- * RecordId.
- *
- * Can only be called after a saveState but before a restoreState.
- *
- * The provided OperationContext should be used if any work needs to be performed during the
- * invalidate (as the state of the stage must be saved before any calls to invalidate, the
- * stage's own OperationContext is inactive during the invalidate and should not be used).
- *
- * Propagates to all children, then calls doInvalidate().
- */
- void invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
-
/*
* Releases any resources held by this stage. It is an error to use a PlanStage in any way after
* calling dispose(). Does not throw exceptions.
@@ -381,11 +365,6 @@ protected:
*/
virtual void doDispose() {}
- /**
- * Does the stage-specific invalidation work.
- */
- virtual void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {}
-
ClockSource* getClock() const;
OperationContext* getOpCtx() const {
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 434a4dda1a3..b7f0c7d9469 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -60,7 +60,6 @@ struct CommonStats {
works(0),
yields(0),
unyields(0),
- invalidates(0),
advanced(0),
needTime(0),
needYield(0),
@@ -73,7 +72,6 @@ struct CommonStats {
size_t works;
size_t yields;
size_t unyields;
- size_t invalidates;
// How many times was this state the return value of work(...)?
size_t advanced;
@@ -138,20 +136,13 @@ private:
};
struct AndHashStats : public SpecificStats {
- AndHashStats() : flaggedButPassed(0), flaggedInProgress(0), memUsage(0), memLimit(0) {}
+ AndHashStats() = default;
SpecificStats* clone() const final {
AndHashStats* specific = new AndHashStats(*this);
return specific;
}
- // Invalidation counters.
- // How many results had the AND fully evaluated but were invalidated?
- size_t flaggedButPassed;
-
- // How many results were mid-AND but got flagged?
- size_t flaggedInProgress;
-
// How many entries are in the map after each child?
// child 'i' produced children[i].common.advanced RecordIds, of which mapAfterChild[i] were
// intersections.
@@ -161,14 +152,14 @@ struct AndHashStats : public SpecificStats {
// commonstats.advanced is how many passed.
// What's our current memory usage?
- size_t memUsage;
+ size_t memUsage = 0u;
// What's our memory limit?
- size_t memLimit;
+ size_t memLimit = 0u;
};
struct AndSortedStats : public SpecificStats {
- AndSortedStats() : flagged(0) {}
+ AndSortedStats() = default;
SpecificStats* clone() const final {
AndSortedStats* specific = new AndSortedStats(*this);
@@ -177,9 +168,6 @@ struct AndSortedStats : public SpecificStats {
// How many results from each child did not pass the AND?
std::vector<size_t> failedAnd;
-
- // How many results were flagged via invalidation?
- size_t flagged;
};
struct CachedPlanStats : public SpecificStats {
@@ -281,17 +269,13 @@ struct CountScanStats : public SpecificStats {
};
struct DeleteStats : public SpecificStats {
- DeleteStats() : docsDeleted(0), nInvalidateSkips(0) {}
+ DeleteStats() = default;
SpecificStats* clone() const final {
return new DeleteStats(*this);
}
- size_t docsDeleted;
-
- // Invalidated documents can be force-fetched, causing the now invalid RecordId to
- // be thrown out. The delete stage skips over any results which do not have a RecordId.
- size_t nInvalidateSkips;
+ size_t docsDeleted = 0u;
};
struct DistinctScanStats : public SpecificStats {
@@ -345,7 +329,7 @@ struct EnsureSortedStats : public SpecificStats {
};
struct FetchStats : public SpecificStats {
- FetchStats() : alreadyHasObj(0), forcedFetches(0), docsExamined(0) {}
+ FetchStats() = default;
SpecificStats* clone() const final {
FetchStats* specific = new FetchStats(*this);
@@ -353,13 +337,10 @@ struct FetchStats : public SpecificStats {
}
// Have we seen anything that already had an object?
- size_t alreadyHasObj;
-
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
+ size_t alreadyHasObj = 0u;
// The total number of full documents touched by the fetch stage.
- size_t docsExamined;
+ size_t docsExamined = 0u;
};
struct GroupStats : public SpecificStats {
@@ -401,7 +382,6 @@ struct IndexScanStats : public SpecificStats {
isUnique(false),
dupsTested(0),
dupsDropped(0),
- seenInvalidated(0),
keysExamined(0),
seeks(0) {}
@@ -448,9 +428,6 @@ struct IndexScanStats : public SpecificStats {
size_t dupsTested;
size_t dupsDropped;
- size_t seenInvalidated;
- // TODO: we could track key sizes here.
-
// Number of entries retrieved from the index during the scan.
size_t keysExamined;
@@ -486,18 +463,15 @@ struct MultiPlanStats : public SpecificStats {
};
struct OrStats : public SpecificStats {
- OrStats() : dupsTested(0), dupsDropped(0), recordIdsForgotten(0) {}
+ OrStats() = default;
SpecificStats* clone() const final {
OrStats* specific = new OrStats(*this);
return specific;
}
- size_t dupsTested;
- size_t dupsDropped;
-
- // How many calls to invalidate(...) actually removed a RecordId from our deduping map?
- size_t recordIdsForgotten;
+ size_t dupsTested = 0u;
+ size_t dupsDropped = 0u;
};
struct ProjectionStats : public SpecificStats {
@@ -513,42 +487,36 @@ struct ProjectionStats : public SpecificStats {
};
struct SortStats : public SpecificStats {
- SortStats() : forcedFetches(0), memUsage(0), memLimit(0) {}
+ SortStats() = default;
SpecificStats* clone() const final {
SortStats* specific = new SortStats(*this);
return specific;
}
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
-
// What's our current memory usage?
- size_t memUsage;
+ size_t memUsage = 0u;
// What's our memory limit?
- size_t memLimit;
+ size_t memLimit = 0u;
// The number of results to return from the sort.
- size_t limit;
+ size_t limit = 0u;
// The pattern according to which we are sorting.
BSONObj sortPattern;
};
struct MergeSortStats : public SpecificStats {
- MergeSortStats() : dupsTested(0), dupsDropped(0), forcedFetches(0) {}
+ MergeSortStats() = default;
SpecificStats* clone() const final {
MergeSortStats* specific = new MergeSortStats(*this);
return specific;
}
- size_t dupsTested;
- size_t dupsDropped;
-
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
+ size_t dupsTested = 0u;
+ size_t dupsDropped = 0u;
// The pattern according to which we are sorting.
BSONObj sortPattern;
@@ -610,8 +578,7 @@ struct UpdateStats : public SpecificStats {
nModified(0),
isDocReplacement(false),
fastmodinsert(false),
- inserted(false),
- nInvalidateSkips(0) {}
+ inserted(false) {}
SpecificStats* clone() const final {
return new UpdateStats(*this);
@@ -636,11 +603,6 @@ struct UpdateStats : public SpecificStats {
// The object that was inserted. This is an empty document if no insert was performed.
BSONObj objInserted;
-
- // Invalidated documents can be force-fetched, causing the now invalid RecordId to
- // be thrown out. The update stage skips over any results which do not have the
- // RecordId to update.
- size_t nInvalidateSkips;
};
struct TextStats : public SpecificStats {
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 7bcadd0675b..aabd85106a5 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -150,11 +150,6 @@ Status ProjectionStage::transform(WorkingSetMember* member) {
BSONObjBuilder bob;
- // Note that even if our fast path analysis is bug-free something that is
- // covered might be invalidated and just be an obj. In this case we just go
- // through the SIMPLE_DOC path which is still correct if the covered data
- // is not available.
- //
// SIMPLE_DOC implies that we expect an object so it's kind of redundant.
if ((ProjectionStageParams::SIMPLE_DOC == _projImpl) || member->hasObj()) {
// If we got here because of SIMPLE_DOC the planner shouldn't have messed up.
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index de09994b2a0..cac5a0c1625 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -90,7 +90,6 @@ TEST_F(QueuedDataStageTest, validateStats) {
const CommonStats* stats = mock->getCommonStats();
ASSERT_EQUALS(stats->yields, 0U);
ASSERT_EQUALS(stats->unyields, 0U);
- ASSERT_EQUALS(stats->invalidates, 0U);
ASSERT_EQUALS(stats->works, 0U);
ASSERT_EQUALS(stats->needTime, 0U);
ASSERT_EQUALS(stats->advanced, 0U);
@@ -118,10 +117,6 @@ TEST_F(QueuedDataStageTest, validateStats) {
mock->restoreState();
ASSERT_EQUALS(stats->unyields, 1U);
- // invalidates
- const RecordId dl(0, 0);
- mock->invalidate(NULL, dl, INVALIDATION_MUTATION);
- ASSERT_EQUALS(stats->invalidates, 1U);
// and now we are d1U, but must trigger EOF with getStats()
ASSERT_FALSE(stats->isEOF);
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 243e8bf7a05..97f3b469d65 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -121,19 +121,11 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
StageState code = child()->work(&id);
if (PlanStage::ADVANCED == code) {
- // Add it into the map for quick invalidation if it has a valid RecordId.
- // A RecordId may be invalidated at any time (during a yield). We need to get into
- // the WorkingSet as quickly as possible to handle it.
WorkingSetMember* member = _ws->get(id);
// Planner must put a fetch before we get here.
verify(member->hasObj());
- // We might be sorting something that was invalidated at some point.
- if (member->hasRecordId()) {
- _wsidByRecordId[member->recordId] = id;
- }
-
SortableDataItem item;
item.wsid = id;
@@ -177,40 +169,9 @@ PlanStage::StageState SortStage::doWork(WorkingSetID* out) {
*out = _resultIterator->wsid;
_resultIterator++;
- // If we're returning something, take it out of our DL -> WSID map so that future
- // calls to invalidate don't cause us to take action for a DL we're done with.
- WorkingSetMember* member = _ws->get(*out);
- if (member->hasRecordId()) {
- _wsidByRecordId.erase(member->recordId);
- }
-
return PlanStage::ADVANCED;
}
-void SortStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // If we have a deletion, we can fetch and carry on.
- // If we have a mutation, it's easier to fetch and use the previous document.
- // So, no matter what, fetch and keep the doc in play.
-
- // _data contains indices into the WorkingSet, not actual data. If a WorkingSetMember in
- // the WorkingSet needs to change state as a result of a RecordId invalidation, it will still
- // be at the same spot in the WorkingSet. As such, we don't need to modify _data.
- DataMap::iterator it = _wsidByRecordId.find(dl);
-
- // If we're holding on to data that's got the RecordId we're invalidating...
- if (_wsidByRecordId.end() != it) {
- // Grab the WSM that we're nuking.
- WorkingSetMember* member = _ws->get(it->second);
- verify(member->recordId == dl);
-
- WorkingSetCommon::fetchAndInvalidateRecordId(opCtx, member, _collection);
-
- // Remove the RecordId from our set of active DLs.
- _wsidByRecordId.erase(it);
- ++_specificStats.forcedFetches;
- }
-}
-
unique_ptr<PlanStageStats> SortStage::getStats() {
_commonStats.isEOF = isEOF();
const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes.load());
@@ -306,13 +267,10 @@ void SortStage::addToBuffer(const SortableDataItem& item) {
}
}
- // If the working set ID is valid, remove from
- // RecordId invalidation map and free from working set.
+ // There was a buffered result which we can throw out because we are executing a sort with a
+ // limit, and the result is now known not to be in the top k set. Free the working set member
+ // associated with 'wsidToFree'.
if (wsidToFree != WorkingSet::INVALID_ID) {
- WorkingSetMember* member = _ws->get(wsidToFree);
- if (member->hasRecordId()) {
- _wsidByRecordId.erase(member->recordId);
- }
_ws->free(wsidToFree);
}
}
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 3418cea9447..e3793b20c62 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -77,8 +77,6 @@ public:
bool isEOF() final;
StageState doWork(WorkingSetID* out) final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
-
StageType stageType() const final {
return STAGE_SORT;
}
@@ -170,10 +168,6 @@ private:
// Iterates through _data post-sort returning it.
std::vector<SortableDataItem>::iterator _resultIterator;
- // We buffer a lot of data and we want to look it up by RecordId quickly upon invalidation.
- typedef stdx::unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
- DataMap _wsidByRecordId;
-
SortStats _specificStats;
// The usage in bytes of all buffered data that we're sorting.
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 00c4c5a053c..d6a78a3853e 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -252,9 +252,9 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
// We pass the SometimesCache option to the MPS because the SubplanStage currently does
// not use the CachedPlanStage's eviction mechanism. We therefore are more conservative
- // about putting a potentially bad plan into the cache in the subplan path.
- // We temporarily add the MPS to _children to ensure that we pass down all
- // save/restore/invalidate messages that can be generated if pickBestPlan yields.
+ // about putting a potentially bad plan into the cache in the subplan path. We
+ // temporarily add the MPS to _children to ensure that we pass down all save/restore
+ // messages that can be generated if pickBestPlan yields.
invariant(_children.empty());
_children.emplace_back(
stdx::make_unique<MultiPlanStage>(getOpCtx(),
diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp
index 19873825342..b4070bb3a3a 100644
--- a/src/mongo/db/exec/text_or.cpp
+++ b/src/mongo/db/exec/text_or.cpp
@@ -104,17 +104,6 @@ void TextOrStage::doReattachToOperationContext() {
_recordCursor->reattachToOperationContext(getOpCtx());
}
-void TextOrStage::doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- // Remove the RecordID from the ScoreMap.
- ScoreMap::iterator scoreIt = _scores.find(dl);
- if (scoreIt != _scores.end()) {
- if (scoreIt == _scoreIterator) {
- _scoreIterator++;
- }
- _scores.erase(scoreIt);
- }
-}
-
std::unique_ptr<PlanStageStats> TextOrStage::getStats() {
_commonStats.isEOF = isEOF();
diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h
index 4df4563aefd..41bc808b880 100644
--- a/src/mongo/db/exec/text_or.h
+++ b/src/mongo/db/exec/text_or.h
@@ -87,7 +87,6 @@ public:
void doRestoreState() final;
void doDetachFromOperationContext() final;
void doReattachToOperationContext() final;
- void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) final;
StageType stageType() const final {
return STAGE_TEXT_OR;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index b97d3209e80..cac39a29729 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -569,11 +569,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) {
// it.
ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
- if (!member->hasRecordId()) {
- // We expect to be here because of an invalidation causing a force-fetch.
- ++_specificStats.nInvalidateSkips;
- return PlanStage::NEED_TIME;
- }
+ invariant(member->hasRecordId());
recordId = member->recordId;
// Updates can't have projections. This means that covering analysis will always add
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index a66a184e440..0882328c285 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -157,7 +157,7 @@ bool WorkingSetMember::hasOwnedObj() const {
}
void WorkingSetMember::makeObjOwnedIfNeeded() {
- if (supportsDocLocking() && _state == RID_AND_OBJ && !obj.value().isOwned()) {
+ if (_state == RID_AND_OBJ && !obj.value().isOwned()) {
obj.setValue(obj.value().getOwned());
}
}
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index a2a719208bb..9cc70750638 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -232,8 +232,9 @@ public:
// BSONObj might be owned or unowned.
RID_AND_OBJ,
- // RecordId has been invalidated, or the obj doesn't correspond to an on-disk document
- // anymore (e.g. is a computed expression).
+ // The WSM doesn't correspond to an on-disk document anymore (e.g. is a computed
+ // expression). Since it doesn't correspond to a stored document, a WSM in this state has an
+ // owned BSONObj, but no record id.
OWNED_OBJ,
};
@@ -265,7 +266,8 @@ public:
* Ensures that 'obj' of a WSM in the RID_AND_OBJ state is owned BSON. It is a no-op if the WSM
* is in a different state or if 'obj' is already owned.
*
- * It is also a no-op if the active storage engine doesn't support document-level concurrency.
+ * It is illegal for unowned BSON to survive a yield, so this must be called on any working set
+ * members which may stay alive across yield points.
*/
void makeObjOwnedIfNeeded();
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index cdd4b2227eb..4439af31ba8 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -39,38 +39,7 @@
namespace mongo {
-// static
-bool WorkingSetCommon::fetchAndInvalidateRecordId(OperationContext* opCtx,
- WorkingSetMember* member,
- const Collection* collection) {
- // Already in our desired state.
- if (member->getState() == WorkingSetMember::OWNED_OBJ) {
- return true;
- }
-
- // We can't do anything without a RecordId.
- if (!member->hasRecordId()) {
- return false;
- }
-
- // Do the fetch, invalidate the DL.
- member->obj = collection->docFor(opCtx, member->recordId);
- member->obj.setValue(member->obj.value().getOwned());
- member->recordId = RecordId();
- member->transitionToOwnedObj();
-
- return true;
-}
-
void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
- if (!supportsDocLocking()) {
- // Non doc-locking storage engines use invalidations, so we don't need to examine the
- // buffered working set ids. But we do need to clear the set of ids in order to keep our
- // memory utilization in check.
- workingSet->getAndClearYieldSensitiveIds();
- return;
- }
-
for (auto id : workingSet->getAndClearYieldSensitiveIds()) {
if (workingSet->isFree(id)) {
continue;
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index 61c0e04af15..5f35dffb878 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -41,15 +41,6 @@ class SeekableRecordCursor;
class WorkingSetCommon {
public:
/**
- * Get an owned copy of the BSONObj the WSM refers to.
- * Requires either a valid BSONObj or valid RecordId.
- * Returns true if the fetch and invalidate succeeded, false otherwise.
- */
- static bool fetchAndInvalidateRecordId(OperationContext* opCtx,
- WorkingSetMember* member,
- const Collection* collection);
-
- /**
* This must be called as part of "saveState" operations after all nodes in the tree save their
* state.
*
diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp
index 8d48801f2db..570e1c3ec4f 100644
--- a/src/mongo/db/exec/write_stage_common.cpp
+++ b/src/mongo/db/exec/write_stage_common.cpp
@@ -46,8 +46,13 @@ bool ensureStillMatches(const Collection* collection,
const CanonicalQuery* cq) {
// If the snapshot changed, then we have to make sure we have the latest copy of the doc and
// that it still matches.
+ //
+ // Storage engines that don't support document-level concurrency also do not track snapshot ids.
+ // Those storage engines always need to check whether the document still matches, as the
+ // document we are planning to delete may have already been deleted or updated during yield.
WorkingSetMember* member = ws->get(id);
- if (opCtx->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ if (!supportsDocLocking() ||
+ opCtx->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
std::unique_ptr<SeekableRecordCursor> cursor(collection->getCursor(opCtx));
if (!WorkingSetCommon::fetch(opCtx, ws, id, cursor)) {
diff --git a/src/mongo/db/invalidation_type.h b/src/mongo/db/invalidation_type.h
deleted file mode 100644
index 70c40ba9103..00000000000
--- a/src/mongo/db/invalidation_type.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Copyright (C) 2014 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-namespace mongo {
-
-enum InvalidationType {
- // The RecordId is about to be deleted. The receiver of this invalidate call cannot use
- // the RecordId after it returns from the invalidate.
- INVALIDATION_DELETION,
-
- // The RecordId's contents are about to change.
- INVALIDATION_MUTATION,
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index bff00347c6a..99afeadd7da 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -330,7 +330,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->appendNumber("saveState", stats.common.yields);
bob->appendNumber("restoreState", stats.common.unyields);
bob->appendNumber("isEOF", stats.common.isEOF);
- bob->appendNumber("invalidates", stats.common.invalidates);
}
// Stage-specific stats
@@ -341,8 +340,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->appendNumber("memUsage", spec->memUsage);
bob->appendNumber("memLimit", spec->memLimit);
- bob->appendNumber("flaggedButPassed", spec->flaggedButPassed);
- bob->appendNumber("flaggedInProgress", spec->flaggedInProgress);
for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) {
bob->appendNumber(string(stream() << "mapAfterChild_" << i),
spec->mapAfterChild[i]);
@@ -352,7 +349,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
- bob->appendNumber("flagged", spec->flagged);
for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]);
}
@@ -405,7 +401,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nWouldDelete", spec->docsDeleted);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
}
} else if (STAGE_DISTINCT_SCAN == stats.stageType) {
DistinctScanStats* spec = static_cast<DistinctScanStats*>(stats.specific.get());
@@ -507,7 +502,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
bob->appendNumber("seeks", spec->seeks);
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("seenInvalidated", spec->seenInvalidated);
}
} else if (STAGE_OR == stats.stageType) {
OrStats* spec = static_cast<OrStats*>(stats.specific.get());
@@ -515,7 +509,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("dupsTested", spec->dupsTested);
bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("recordIdsForgotten", spec->recordIdsForgotten);
}
} else if (STAGE_LIMIT == stats.stageType) {
LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
@@ -577,7 +570,6 @@ void Explain::statsToBSON(const PlanStageStats& stats,
if (verbosity >= ExplainOptions::Verbosity::kExecStats) {
bob->appendNumber("nMatched", spec->nMatched);
bob->appendNumber("nWouldModify", spec->nModified);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
bob->appendBool("wouldInsert", spec->inserted);
bob->appendBool("fastmodinsert", spec->fastmodinsert);
}
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index d34f429fdab..046b6e67256 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -397,12 +397,6 @@ void PlanExecutor::reattachToOperationContext(OperationContext* opCtx) {
_currentState = kSaved;
}
-void PlanExecutor::invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) {
- if (!isMarkedAsKilled()) {
- _root->invalidate(opCtx, dl, type);
- }
-}
-
PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
Snapshotted<BSONObj> snapshotted;
ExecState state = getNextImpl(objOut ? &snapshotted : NULL, dlOut);
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index 0fe1f01fdf5..5c99759741a 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -33,7 +33,6 @@
#include "mongo/base/status.h"
#include "mongo/db/catalog/util/partitioned.h"
-#include "mongo/db/invalidation_type.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/storage/snapshot.h"
#include "mongo/stdx/unordered_set.h"
@@ -104,11 +103,11 @@ public:
// will handle all WriteConflictExceptions that occur while processing the query.
YIELD_AUTO,
- // This will handle WriteConflictExceptions that occur while processing the query, but
- // will not yield locks. abandonSnapshot() will be called if a WriteConflictException
- // occurs so callers must be prepared to get a new snapshot. A PlanExecutor constructed with
- // this yield policy will not be registered to receive invalidations, so the caller must
- // hold their locks continuously from construction to destruction.
+ // This will handle WriteConflictExceptions that occur while processing the query, but will
+ // not yield locks. abandonSnapshot() will be called if a WriteConflictException occurs so
+ // callers must be prepared to get a new snapshot. The caller must hold their locks
+ // continuously from construction to destruction, since a PlanExecutor with this policy will
+ // not be registered to receive kill notifications.
WRITE_CONFLICT_RETRY_ONLY,
// Use this policy if you want to disable auto-yielding, but will release locks while using
@@ -397,13 +396,6 @@ public:
void dispose(OperationContext* opCtx, CursorManager* cursorManager);
/**
- * If we're yielding locks, writes may occur to documents that we rely on to keep valid
- * state. As such, if the plan yields, it must be notified of relevant writes so that
- * we can ensure that it doesn't crash if we try to access invalid state.
- */
- void invalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type);
-
- /**
* Helper method to aid in displaying an ExecState for debug or other recreational purposes.
*/
static std::string statestr(ExecState s);
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index 5265eee2422..5b694856399 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -28,8 +28,8 @@
#pragma once
-#include <list>
#include <memory>
+#include <queue>
#include <vector>
#include "mongo/base/owned_pointer_vector.h"
@@ -77,7 +77,7 @@ struct CandidatePlan {
WorkingSet* ws; // Not owned here.
// Any results produced during the plan's execution prior to ranking are retained here.
- std::list<WorkingSetID> results;
+ std::queue<WorkingSetID> results;
bool failed;
};
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 722f81c5e96..bfd02c37b68 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -160,7 +160,6 @@ public:
BSONArrayBuilder* arrBuilder);
private:
- friend class DeleteNotificationStage;
friend class LogOpForShardingHandler;
// Represents the states in which the cloner can be
diff --git a/src/mongo/db/storage/mobile/mobile_record_store.cpp b/src/mongo/db/storage/mobile/mobile_record_store.cpp
index 0111abb67e3..1ba8020a76f 100644
--- a/src/mongo/db/storage/mobile/mobile_record_store.cpp
+++ b/src/mongo/db/storage/mobile/mobile_record_store.cpp
@@ -112,6 +112,11 @@ public:
restore();
boost::optional<Record> rec = next();
+ if (rec && rec->id != id) {
+ // The record we found isn't the one the caller asked for.
+ return boost::none;
+ }
+
return rec;
}
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index c0da914f734..097a56e3cbb 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -74,6 +74,9 @@ protected:
/**
* @see RecordStore::updateRecord
+ *
+ * TODO SERVER-36662: Without MMAPv1 diskloc invalidations, this interface is no longer necessary.
+ * This callback's entire role was to issue INVALIDATION_MUTATION notifications.
*/
class UpdateNotifier {
public:
@@ -109,12 +112,12 @@ enum ValidateCmdLevel : int {
* inside that context. Any cursor acquired inside a transaction is invalid outside
* of that transaction, instead use the save and restore methods to reestablish the cursor.
*
- * Any method other than invalidate and the save methods may throw WriteConflictException. If
- * that happens, the cursor may not be used again until it has been saved and successfully
- * restored. If next() or restore() throw a WCE the cursor's position will be the same as before
- * the call (strong exception guarantee). All other methods leave the cursor in a valid state
- * but with an unspecified position (basic exception guarantee). If any exception other than
- * WCE is thrown, the cursor must be destroyed, which is guaranteed not to leak any resources.
+ * Any method other than the save method may throw WriteConflictException. If that happens, the
+ * cursor may not be used again until it has been saved and successfully restored. If next() or
+ * restore() throw a WCE the cursor's position will be the same as before the call (strong exception
+ * guarantee). All other methods leave the cursor in a valid state but with an unspecified position
+ * (basic exception guarantee). If any exception other than WCE is thrown, the cursor must be
+ * destroyed, which is guaranteed not to leak any resources.
*
* Any returned unowned BSON is only valid until the next call to any method on this
* interface.
@@ -194,15 +197,6 @@ public:
*/
virtual void reattachToOperationContext(OperationContext* opCtx) = 0;
- /**
- * Inform the cursor that this id is being invalidated. Must be called between save and restore.
- * The opCtx is that of the operation causing the invalidation, not the opCtx using the cursor.
- *
- * WARNING: Storage engines other than MMAPv1 should use the default implementation,
- * and not depend on this being called.
- */
- virtual void invalidate(OperationContext* opCtx, const RecordId& id) {}
-
//
// RecordFetchers
//
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index 968dfbcb760..b7ff0e54ed4 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -443,5 +443,46 @@ TEST(RecordStoreTestHarness, SeekAfterEofAndContinue) {
ASSERT(!cursor->next());
}
+// seekExact() must return boost::none if the RecordId does not exist.
+TEST(RecordStoreTestHarness, SeekExactForMissingRecordReturnsNone) {
+ const auto harnessHelper{newRecordStoreHarnessHelper()};
+ auto recordStore = harnessHelper->newNonCappedRecordStore();
+ ServiceContext::UniqueOperationContext opCtx{harnessHelper->newOperationContext()};
+
+ // Insert three records and remember their record ids.
+ const int nToInsert = 3;
+ RecordId recordIds[nToInsert];
+ for (int i = 0; i < nToInsert; ++i) {
+ StringBuilder sb;
+ sb << "record " << i;
+ string data = sb.str();
+
+ WriteUnitOfWork uow{opCtx.get()};
+ auto res =
+ recordStore->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, Timestamp{});
+ ASSERT_OK(res.getStatus());
+ recordIds[i] = res.getValue();
+ uow.commit();
+ }
+
+ // Delete the second record.
+ {
+ WriteUnitOfWork uow{opCtx.get()};
+ recordStore->deleteRecord(opCtx.get(), recordIds[1]);
+ uow.commit();
+ }
+
+ // Seeking to the second record should now return boost::none, for both forward and reverse
+ // cursors.
+ for (bool direction : {true, false}) {
+ auto cursor = recordStore->getCursor(opCtx.get(), direction);
+ ASSERT(!cursor->seekExact(recordIds[1]));
+ }
+
+ // Similarly, findRecord() should not find the deleted record.
+ RecordData outputData;
+ ASSERT_FALSE(recordStore->findRecord(opCtx.get(), recordIds[1], &outputData));
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index a84e7dcab4d..aa8f221a538 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -119,54 +119,5 @@ TEST(RecordStoreTestHarness, GetIteratorForRepairNonEmpty) {
}
}
-// Insert a single record. Create a repair iterator pointing to that single record.
-// Then invalidate the record and ensure that the repair iterator responds correctly.
-// See SERVER-16300.
-TEST(RecordStoreTestHarness, GetIteratorForRepairInvalidateSingleton) {
- const auto harnessHelper(newRecordStoreHarnessHelper());
- unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
-
- {
- ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
- ASSERT_EQ(0, rs->numRecords(opCtx.get()));
- }
-
- // Insert one record.
- RecordId idToInvalidate;
- {
- ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
- WriteUnitOfWork uow(opCtx.get());
- StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, Timestamp());
- ASSERT_OK(res.getStatus());
- idToInvalidate = res.getValue();
- uow.commit();
- }
-
- // Double-check that the record store has one record in it now.
- {
- ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
- ASSERT_EQ(1, rs->numRecords(opCtx.get()));
- }
-
- {
- ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
- auto cursor = rs->getCursorForRepair(opCtx.get());
- // returns NULL if getCursorForRepair is not supported
- if (!cursor) {
- return;
- }
-
- // We should be pointing at the only record in the store.
-
- // Invalidate the record we're pointing at.
- cursor->save();
- cursor->invalidate(opCtx.get(), idToInvalidate);
- cursor->restore();
-
- // Iterator should be EOF now because the only thing in the collection got deleted.
- ASSERT(!cursor->next());
- }
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 5b071ba838c..a6e399f2015 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -274,7 +274,7 @@ public:
// "next" object we would have gotten after that.
//
-class QueryStageCollscanInvalidateUpcomingObject : public QueryStageCollectionScanBase {
+class QueryStageCollscanDeleteUpcomingObject : public QueryStageCollectionScanBase {
public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
@@ -308,11 +308,6 @@ public:
// Remove recordIds[count].
scan->saveState();
- {
- WriteUnitOfWork wunit(&_opCtx);
- scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION);
- wunit.commit(); // to avoid rollback of the invalidate
- }
remove(coll->docFor(&_opCtx, recordIds[count]).value());
scan->restoreState();
@@ -340,7 +335,7 @@ public:
// "next" object we would have gotten after that. But, do it in reverse!
//
-class QueryStageCollscanInvalidateUpcomingObjectBackward : public QueryStageCollectionScanBase {
+class QueryStageCollscanDeleteUpcomingObjectBackward : public QueryStageCollectionScanBase {
public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
@@ -373,11 +368,6 @@ public:
// Remove recordIds[count].
scan->saveState();
- {
- WriteUnitOfWork wunit(&_opCtx);
- scan->invalidate(&_opCtx, recordIds[count], INVALIDATION_DELETION);
- wunit.commit(); // to avoid rollback of the invalidate
- }
remove(coll->docFor(&_opCtx, recordIds[count]).value());
scan->restoreState();
@@ -412,8 +402,8 @@ public:
add<QueryStageCollscanBasicBackwardWithMatch>();
add<QueryStageCollscanObjectsInOrderForward>();
add<QueryStageCollscanObjectsInOrderBackward>();
- add<QueryStageCollscanInvalidateUpcomingObject>();
- add<QueryStageCollscanInvalidateUpcomingObjectBackward>();
+ add<QueryStageCollscanDeleteUpcomingObject>();
+ add<QueryStageCollscanDeleteUpcomingObjectBackward>();
}
};
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 2bfeb561dfa..82f5aaf8b07 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -304,10 +304,8 @@ public:
// At this point, our first interjection, we've counted _recordIds[0]
// and are about to count _recordIds[1]
WriteUnitOfWork wunit(&_opCtx);
- count_stage.invalidate(&_opCtx, _recordIds[interjection], INVALIDATION_DELETION);
remove(_recordIds[interjection]);
- count_stage.invalidate(&_opCtx, _recordIds[interjection + 1], INVALIDATION_DELETION);
remove(_recordIds[interjection + 1]);
wunit.commit();
}
@@ -328,11 +326,9 @@ public:
// At the point which this is called we are in between the first and second record
void interject(CountStage& count_stage, int interjection) {
if (interjection == 0) {
- count_stage.invalidate(&_opCtx, _recordIds[0], INVALIDATION_MUTATION);
OID id1 = _coll->docFor(&_opCtx, _recordIds[0]).value().getField("_id").OID();
update(_recordIds[0], BSON("_id" << id1 << "x" << 100));
- count_stage.invalidate(&_opCtx, _recordIds[1], INVALIDATION_MUTATION);
OID id2 = _coll->docFor(&_opCtx, _recordIds[1]).value().getField("_id").OID();
update(_recordIds[1], BSON("_id" << id2 << "x" << 100));
}
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 987eb822f3f..7d604e19a7e 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -122,12 +122,9 @@ private:
DBDirectClient _client;
};
-//
-// Test invalidation for the delete stage. Use the delete stage to delete some objects
-// retrieved by a collscan, then invalidate the upcoming object, then expect the delete stage to
-// skip over it and successfully delete the rest.
-//
-class QueryStageDeleteInvalidateUpcomingObject : public QueryStageDeleteBase {
+// Use the delete stage to delete some objects retrieved by a collscan, then separately delete the
+// upcoming object. We expect the delete stage to skip over it and successfully continue.
+class QueryStageDeleteUpcomingObjectWasDeleted : public QueryStageDeleteBase {
public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
@@ -167,11 +164,6 @@ public:
// Remove recordIds[targetDocIndex];
deleteStage.saveState();
- {
- WriteUnitOfWork wunit(&_opCtx);
- deleteStage.invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION);
- wunit.commit();
- }
BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
@@ -255,65 +247,14 @@ public:
}
};
-/**
- * Test that a delete stage which has not been asked to return the deleted document will skip a
- * WorkingSetMember that has been returned from the child in the OWNED_OBJ state. A WorkingSetMember
- * in the OWNED_OBJ state implies there was a conflict during execution, so this WorkingSetMember
- * should be skipped.
- */
-class QueryStageDeleteSkipOwnedObjects : public QueryStageDeleteBase {
-public:
- void run() {
- // Various variables we'll need.
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- Collection* coll = ctx.getCollection();
- const BSONObj query = BSONObj();
- const auto ws = make_unique<WorkingSet>();
- const unique_ptr<CanonicalQuery> cq(canonicalize(query));
-
- // Configure a QueuedDataStage to pass an OWNED_OBJ to the delete stage.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
- {
- WorkingSetID id = ws->allocate();
- WorkingSetMember* member = ws->get(id);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), fromjson("{x: 1}"));
- member->transitionToOwnedObj();
- qds->pushBack(id);
- }
-
- // Configure the delete.
- DeleteStageParams deleteParams;
- deleteParams.isMulti = false;
- deleteParams.canonicalQuery = cq.get();
-
- const auto deleteStage =
- make_unique<DeleteStage>(&_opCtx, deleteParams, ws.get(), coll, qds.release());
- const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
-
- // Call work, passing the set up member to the delete stage.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = deleteStage->work(&id);
-
- // Should return NEED_TIME, not deleting anything.
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
- ASSERT_EQUALS(stats->docsDeleted, 0U);
-
- id = WorkingSet::INVALID_ID;
- state = deleteStage->work(&id);
- ASSERT_EQUALS(PlanStage::IS_EOF, state);
- }
-};
-
-
class All : public Suite {
public:
All() : Suite("query_stage_delete") {}
void setupTests() {
// Stage-specific tests below.
- add<QueryStageDeleteInvalidateUpcomingObject>();
+ add<QueryStageDeleteUpcomingObjectWasDeleted>();
add<QueryStageDeleteReturnOldDoc>();
- add<QueryStageDeleteSkipOwnedObjects>();
}
};
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 4e4d7da79eb..c50df0cbfd0 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/merge_sort.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/db/query/plan_executor.h"
@@ -94,6 +95,10 @@ public:
_client.remove(ns(), obj);
}
+ void update(const BSONObj& predicate, const BSONObj& update) {
+ _client.update(ns(), predicate, update);
+ }
+
void getRecordIds(set<RecordId>* out, Collection* coll) {
auto cursor = coll->getCursor(&_opCtx);
while (auto record = cursor->next()) {
@@ -506,8 +511,8 @@ public:
}
};
-// Invalidation mid-run
-class QueryStageMergeSortInvalidation : public QueryStageMergeSortTestBase {
+// Document is deleted mid-run.
+class QueryStageMergeSortDeletedDocument : public QueryStageMergeSortTestBase {
public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
@@ -565,12 +570,15 @@ public:
++it;
}
- // Invalidate recordIds[11]. Should force a fetch and return the deleted document.
+ // Delete recordIds[11]. The deleted document should be buffered inside the SORT_MERGE
+ // stage, and therefore should still be returned.
ms->saveState();
- ms->invalidate(&_opCtx, *it, INVALIDATION_DELETION);
+ remove(BSON(std::string(1u, 'a' + count) << 1));
ms->restoreState();
- // Make sure recordIds[11] was fetched for us.
+ // Make sure recordIds[11] is returned as expected. We expect the corresponding working set
+ // member to remain in RID_AND_IDX state. It should have been marked as "suspicious", since
+ // this is an index key WSM that survived a yield.
{
WorkingSetID id = WorkingSet::INVALID_ID;
PlanStage::StageState status;
@@ -579,8 +587,9 @@ public:
} while (PlanStage::ADVANCED != status);
WorkingSetMember* member = ws.get(id);
- ASSERT(!member->hasRecordId());
- ASSERT(member->hasObj());
+ ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_IDX);
+ ASSERT(member->hasRecordId());
+ ASSERT(!member->hasObj());
string index(1, 'a' + count);
BSONElement elt;
ASSERT_TRUE(member->getFieldDotted(index, &elt));
@@ -588,6 +597,10 @@ public:
ASSERT(member->getFieldDotted("foo", &elt));
ASSERT_EQUALS(count, elt.numberInt());
+ // An attempt to fetch the WSM should show that the key is no longer present in the
+ // index.
+ ASSERT_FALSE(WorkingSetCommon::fetch(&_opCtx, &ws, id, coll->getCursor(&_opCtx)));
+
++it;
++count;
}
@@ -616,7 +629,7 @@ public:
// Test that if a WSM buffered inside the merge sort stage gets updated, we return the document and
// then correctly dedup if we see the same RecordId again.
-class QueryStageMergeSortInvalidationMutationDedup : public QueryStageMergeSortTestBase {
+class QueryStageMergeSortConcurrentUpdateDedup : public QueryStageMergeSortTestBase {
public:
void run() {
dbtests::WriteContextForTests ctx(&_opCtx, ns());
@@ -672,12 +685,17 @@ public:
ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 4 << "a" << 4));
++it;
- // Doc {a: 5} gets invalidated by an update.
- ms->invalidate(&_opCtx, *it, INVALIDATION_MUTATION);
+ // Update doc {a: 5} such that the postimage will no longer match the query.
+ ms->saveState();
+ update(BSON("a" << 5), BSON("$set" << BSON("a" << 15)));
+ ms->restoreState();
- // Invalidated doc {a: 5} should still get returned.
+ // Invalidated doc {a: 5} should still get returned. We expect an RID_AND_OBJ working set
+ // member with an owned BSONObj.
member = getNextResult(&ws, ms.get());
- ASSERT_EQ(member->getState(), WorkingSetMember::OWNED_OBJ);
+ ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
+ ASSERT(member->hasObj());
+ ASSERT(member->obj.value().isOwned());
ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 5 << "a" << 5));
++it;
@@ -851,8 +869,8 @@ public:
add<QueryStageMergeSortPrefixIndexReverse>();
add<QueryStageMergeSortOneStageEOF>();
add<QueryStageMergeSortManyShort>();
- add<QueryStageMergeSortInvalidation>();
- add<QueryStageMergeSortInvalidationMutationDedup>();
+ add<QueryStageMergeSortDeletedDocument>();
+ add<QueryStageMergeSortConcurrentUpdateDedup>();
add<QueryStageMergeSortStringsWithNullCollation>();
add<QueryStageMergeSortStringsRespectsCollation>();
}
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index ea8ef9e45ec..750e63a4444 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -103,8 +103,8 @@ public:
}
_children.push_back(std::move(queuedStage));
- return StatusWith<CoveredInterval*>(new CoveredInterval(
- _children.back().get(), true, interval.min, interval.max, lastInterval));
+ return StatusWith<CoveredInterval*>(
+ new CoveredInterval(_children.back().get(), interval.min, interval.max, lastInterval));
}
StatusWith<double> computeDistance(WorkingSetMember* member) final {
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 7deed7fea80..5ecd150af12 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -252,10 +252,9 @@ public:
};
/**
- * Test receipt of an invalidation: case in which the document about to updated
- * is deleted.
+ * Test the case in which the document about to updated is deleted.
*/
-class QueryStageUpdateSkipInvalidatedDoc : public QueryStageUpdateBase {
+class QueryStageUpdateSkipDeletedDoc : public QueryStageUpdateBase {
public:
void run() {
// Run the update.
@@ -327,11 +326,6 @@ public:
// Remove recordIds[targetDocIndex];
updateStage->saveState();
- {
- WriteUnitOfWork wunit(&_opCtx);
- updateStage->invalidate(&_opCtx, recordIds[targetDocIndex], INVALIDATION_DELETION);
- wunit.commit();
- }
BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value();
ASSERT(!targetDoc.isEmpty());
remove(targetDoc);
@@ -553,70 +547,6 @@ public:
}
};
-/**
- * Test that an update stage which has not been asked to return any version of the updated document
- * will skip a WorkingSetMember that has been returned from the child in the OWNED_OBJ state. A
- * WorkingSetMember in the OWNED_OBJ state implies there was a conflict during execution, so this
- * WorkingSetMember should be skipped.
- */
-class QueryStageUpdateSkipOwnedObjects : public QueryStageUpdateBase {
-public:
- void run() {
- // Various variables we'll need.
- dbtests::WriteContextForTests ctx(&_opCtx, nss.ns());
- OpDebug* opDebug = &CurOp::get(_opCtx)->debug();
- Collection* coll = ctx.getCollection();
- UpdateLifecycleImpl updateLifecycle(nss);
- UpdateRequest request(nss);
- const CollatorInterface* collator = nullptr;
- UpdateDriver driver(new ExpressionContext(&_opCtx, collator));
- const BSONObj query = BSONObj();
- const auto ws = make_unique<WorkingSet>();
- const unique_ptr<CanonicalQuery> cq(canonicalize(query));
-
- // Populate the request.
- request.setQuery(query);
- request.setUpdates(fromjson("{$set: {x: 0}}"));
- request.setSort(BSONObj());
- request.setMulti(false);
- request.setLifecycle(&updateLifecycle);
-
- const std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters;
-
- ASSERT_DOES_NOT_THROW(driver.parse(request.getUpdates(), arrayFilters, request.isMulti()));
-
- // Configure a QueuedDataStage to pass an OWNED_OBJ to the update stage.
- auto qds = make_unique<QueuedDataStage>(&_opCtx, ws.get());
- {
- WorkingSetID id = ws->allocate();
- WorkingSetMember* member = ws->get(id);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), fromjson("{x: 1}"));
- member->transitionToOwnedObj();
- qds->pushBack(id);
- }
-
- // Configure the update.
- UpdateStageParams updateParams(&request, &driver, opDebug);
- updateParams.canonicalQuery = cq.get();
-
- const auto updateStage =
- make_unique<UpdateStage>(&_opCtx, updateParams, ws.get(), coll, qds.release());
- const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats());
-
- // Call work, passing the set up member to the update stage.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = updateStage->work(&id);
-
- // Should return NEED_TIME, not modifying anything.
- ASSERT_EQUALS(PlanStage::NEED_TIME, state);
- ASSERT_EQUALS(stats->nModified, 0U);
-
- id = WorkingSet::INVALID_ID;
- state = updateStage->work(&id);
- ASSERT_EQUALS(PlanStage::IS_EOF, state);
- }
-};
-
class All : public Suite {
public:
All() : Suite("query_stage_update") {}
@@ -624,10 +554,9 @@ public:
void setupTests() {
// Stage-specific tests below.
add<QueryStageUpdateUpsertEmptyColl>();
- add<QueryStageUpdateSkipInvalidatedDoc>();
+ add<QueryStageUpdateSkipDeletedDoc>();
add<QueryStageUpdateReturnOldDoc>();
add<QueryStageUpdateReturnNewDoc>();
- add<QueryStageUpdateSkipOwnedObjects>();
}
};