diff options
author | Lingzhi Deng <lingzhi.deng@mongodb.com> | 2021-04-28 22:47:50 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-04-28 23:23:13 +0000 |
commit | 136fa52193c342038b3fa35152fa1ed3dee4ee87 (patch) | |
tree | b2054bae1f5c5daa94666ce0306ad6b68b7df7b9 /src/mongo/db/repl/oplog_applier_impl.cpp | |
parent | 00d3ec0d3a9d7c4077148f528bb1f7293fd1b238 (diff) | |
download | mongo-136fa52193c342038b3fa35152fa1ed3dee4ee87.tar.gz |
SERVER-56054: Change minThreads value for replication writer thread pool to 0
Diffstat (limited to 'src/mongo/db/repl/oplog_applier_impl.cpp')
-rw-r--r-- | src/mongo/db/repl/oplog_applier_impl.cpp | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp index 93072539fba..7604af715dc 100644 --- a/src/mongo/db/repl/oplog_applier_impl.cpp +++ b/src/mongo/db/repl/oplog_applier_impl.cpp @@ -410,7 +410,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, // setup/teardown overhead across many writes. const size_t kMinOplogEntriesPerThread = 16; const bool enoughToMultiThread = - ops.size() >= kMinOplogEntriesPerThread * writerPool->getStats().numThreads; + ops.size() >= kMinOplogEntriesPerThread * writerPool->getStats().options.maxThreads; // Storage engines support parallel writes to the oplog because they are required to ensure that // oplog entries are ordered correctly, even if inserted out-of-order. @@ -420,7 +420,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, } - const size_t numOplogThreads = writerPool->getStats().numThreads; + const size_t numOplogThreads = writerPool->getStats().options.maxThreads; const size_t numOpsPerThread = ops.size() / numOplogThreads; for (size_t thread = 0; thread < numOplogThreads; thread++) { size_t begin = thread * numOpsPerThread; @@ -453,7 +453,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, // Increment the batch size stat. oplogApplicationBatchSize.increment(ops.size()); - std::vector<WorkerMultikeyPathInfo> multikeyVector(_writerPool->getStats().numThreads); + std::vector<WorkerMultikeyPathInfo> multikeyVector(_writerPool->getStats().options.maxThreads); { // Each node records cumulative batch application stats for itself using this timer. TimerHolder timer(&applyBatchStats); @@ -479,7 +479,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, std::vector<std::vector<OplogEntry>> derivedOps; std::vector<std::vector<const OplogEntry*>> writerVectors( - _writerPool->getStats().numThreads); + _writerPool->getStats().options.maxThreads); fillWriterVectors(opCtx, &ops, &writerVectors, &derivedOps); // Wait for writes to finish before applying ops. @@ -501,7 +501,8 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx, } { - std::vector<Status> statusVector(_writerPool->getStats().numThreads, Status::OK()); + std::vector<Status> statusVector(_writerPool->getStats().options.maxThreads, + Status::OK()); // Doles out all the work to the writer pool threads. writerVectors is not modified, // but applyOplogBatchPerWorker will modify the vectors that it contains. |