summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp35
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h8
2 files changed, 16 insertions, 27 deletions
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index ec4588624f6..28377ec41a0 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -677,7 +677,8 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
Milliseconds(internalQueryExecYieldPeriodMS.load()));
if (!_jumboChunkCloneState->clonerExec) {
- auto exec = uassertStatusOK(_getIndexScanExecutor(opCtx, collection));
+ auto exec = uassertStatusOK(_getIndexScanExecutor(
+ opCtx, collection, InternalPlanner::IndexScanOptions::IXSCAN_FETCH));
_jumboChunkCloneState->clonerExec = std::move(exec);
} else {
_jumboChunkCloneState->clonerExec->reattachToOperationContext(opCtx);
@@ -685,39 +686,24 @@ void MigrationChunkClonerSourceLegacy::_nextCloneBatchFromIndexScan(OperationCon
}
BSONObj obj;
- RecordId recordId;
PlanExecutor::ExecState execState;
-
while (PlanExecutor::ADVANCED ==
- (execState = _jumboChunkCloneState->clonerExec->getNext(
- &obj, _jumboChunkCloneState->stashedRecordId ? nullptr : &recordId))) {
+ (execState = _jumboChunkCloneState->clonerExec->getNext(&obj, nullptr))) {
stdx::unique_lock<Latch> lk(_mutex);
_jumboChunkCloneState->clonerState = execState;
lk.unlock();
opCtx->checkForInterrupt();
-
// Use the builder size instead of accumulating the document sizes directly so
// that we take into consideration the overhead of BSONArray indices.
if (arrBuilder->arrSize() &&
(arrBuilder->len() + obj.objsize() + 1024) > BSONObjMaxUserSize) {
_jumboChunkCloneState->clonerExec->enqueue(obj);
-
- // Stash the recordId we just read to add to the next batch.
- if (!recordId.isNull()) {
- invariant(!_jumboChunkCloneState->stashedRecordId);
- _jumboChunkCloneState->stashedRecordId = std::move(recordId);
- }
-
break;
}
- Snapshotted<BSONObj> doc;
- invariant(collection->findDoc(
- opCtx, _jumboChunkCloneState->stashedRecordId.value_or(recordId), &doc));
- arrBuilder->append(doc.value());
- _jumboChunkCloneState->stashedRecordId = boost::none;
+ arrBuilder->append(obj);
lk.lock();
_jumboChunkCloneState->docsCloned++;
@@ -885,8 +871,10 @@ StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONO
}
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>>
-MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(OperationContext* opCtx,
- Collection* const collection) {
+MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(
+ OperationContext* opCtx,
+ Collection* const collection,
+ InternalPlanner::IndexScanOptions scanOption) {
// Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any
// multi-key index prefixed by shard key cannot be multikey over the shard key fields.
const IndexDescriptor* idx =
@@ -913,7 +901,9 @@ MigrationChunkClonerSourceLegacy::_getIndexScanExecutor(OperationContext* opCtx,
min,
max,
BoundInclusion::kIncludeStartKeyOnly,
- PlanExecutor::YIELD_AUTO);
+ PlanExecutor::YIELD_AUTO,
+ InternalPlanner::Direction::FORWARD,
+ scanOption);
}
Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) {
@@ -925,7 +915,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opC
str::stream() << "Collection " << _args.getNss().ns() << " does not exist."};
}
- auto swExec = _getIndexScanExecutor(opCtx, collection);
+ auto swExec =
+ _getIndexScanExecutor(opCtx, collection, InternalPlanner::IndexScanOptions::IXSCAN_DEFAULT);
if (!swExec.isOK()) {
return swExec.getStatus();
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 7990615e38f..9d438d20eb0 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -224,7 +224,9 @@ private:
StatusWith<BSONObj> _callRecipient(const BSONObj& cmdObj);
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getIndexScanExecutor(
- OperationContext* opCtx, Collection* const collection);
+ OperationContext* opCtx,
+ Collection* const collection,
+ InternalPlanner::IndexScanOptions scanOption);
void _nextCloneBatchFromIndexScan(OperationContext* opCtx,
Collection* collection,
@@ -387,10 +389,6 @@ private:
// The current state of 'clonerExec'.
PlanExecutor::ExecState clonerState;
- // RecordId of the last doc read in by 'clonerExec' if collection scan yields during
- // cloning.
- boost::optional<RecordId> stashedRecordId;
-
// Number docs in jumbo chunk cloned so far
int docsCloned = 0;
};