diff options
author | Lingzhi Deng <lingzhi.deng@mongodb.com> | 2021-04-28 22:47:50 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-06-09 00:36:54 +0000 |
commit | 56eeca07155e9d5db36934fdbdc36cc118e2c519 (patch) | |
tree | 638b62c3486766846bae82942a419b79bf5a970a /src | |
parent | 1b0f491b901c901a7d4dcc341eef9e0a9f8613d5 (diff) | |
download | mongo-56eeca07155e9d5db36934fdbdc36cc118e2c519.tar.gz |
SERVER-56054: Change minThreads value for replication writer thread pool to 0
(cherry picked from commit 136fa52193c342038b3fa35152fa1ed3dee4ee87)
(cherry picked from commit 4935fe56743814ffc39e9f7d9eebb3648dc2846f)
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/repl/oplog_applier.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/repl/repl_server_parameters.idl | 10 | ||||
-rw-r--r-- | src/mongo/db/repl/sync_tail.cpp | 12 |
3 files changed, 24 insertions, 6 deletions
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp index f72639fdeba..34be41fda71 100644 --- a/src/mongo/db/repl/oplog_applier.cpp +++ b/src/mongo/db/repl/oplog_applier.cpp @@ -49,6 +49,10 @@ using CallbackArgs = executor::TaskExecutor::CallbackArgs; // static std::unique_ptr<ThreadPool> OplogApplier::makeWriterPool() { + if (replWriterThreadCount < replWriterMinThreadCount) { + severe() << "replWriterMinThreadCount must be less than or equal to replWriterThreadCount."; + fassertFailedNoTrace(5605400); + } return makeWriterPool(replWriterThreadCount); } @@ -57,7 +61,9 @@ std::unique_ptr<ThreadPool> OplogApplier::makeWriterPool(int threadCount) { ThreadPool::Options options; options.threadNamePrefix = "repl-writer-worker-"; options.poolName = "repl writer worker Pool"; - options.maxThreads = options.minThreads = static_cast<size_t>(threadCount); + options.minThreads = + replWriterMinThreadCount < threadCount ? replWriterMinThreadCount : threadCount; + options.maxThreads = static_cast<size_t>(threadCount); options.onCreateThread = [](const std::string&) { Client::initThread(getThreadName()); AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); diff --git a/src/mongo/db/repl/repl_server_parameters.idl b/src/mongo/db/repl/repl_server_parameters.idl index 1dd3b2e4143..c97babde6ae 100644 --- a/src/mongo/db/repl/repl_server_parameters.idl +++ b/src/mongo/db/repl/repl_server_parameters.idl @@ -227,6 +227,16 @@ server_parameters: gte: 1 lte: 256 + replWriterMinThreadCount: + description: The minimum number of threads in the thread pool used to apply the oplog + set_at: startup + cpp_vartype: int + cpp_varname: replWriterMinThreadCount + default: 0 + validator: + gte: 0 + lte: 256 + replBatchLimitOperations: description: The maximum number of operations to apply in a single batch set_at: [ startup, runtime ] diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 1b9e71a0c82..922f3c232d3 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -456,7 +456,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, // setup/teardown overhead across many writes. const size_t kMinOplogEntriesPerThread = 16; const bool enoughToMultiThread = - ops.size() >= kMinOplogEntriesPerThread * threadPool->getStats().numThreads; + ops.size() >= kMinOplogEntriesPerThread * threadPool->getStats().options.maxThreads; // Only doc-locking engines support parallel writes to the oplog because they are required to // ensure that oplog entries are ordered correctly, even if inserted out-of-order. Additionally, @@ -470,7 +470,7 @@ void scheduleWritesToOplog(OperationContext* opCtx, } - const size_t numOplogThreads = threadPool->getStats().numThreads; + const size_t numOplogThreads = threadPool->getStats().options.maxThreads; const size_t numOpsPerThread = ops.size() / numOplogThreads; for (size_t thread = 0; thread < numOplogThreads; thread++) { size_t begin = thread * numOpsPerThread; @@ -1354,7 +1354,7 @@ StatusWith<OpTime> SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O // Increment the batch size stat. oplogApplicationBatchSize.increment(ops.size()); - std::vector<WorkerMultikeyPathInfo> multikeyVector(_writerPool->getStats().numThreads); + std::vector<WorkerMultikeyPathInfo> multikeyVector(_writerPool->getStats().options.maxThreads); { // Each node records cumulative batch application stats for itself using this timer. TimerHolder timer(&applyBatchStats); @@ -1378,7 +1378,8 @@ StatusWith<OpTime> SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O // and create a pseudo oplog. std::vector<MultiApplier::Operations> derivedOps; - std::vector<MultiApplier::OperationPtrs> writerVectors(_writerPool->getStats().numThreads); + std::vector<MultiApplier::OperationPtrs> writerVectors( + _writerPool->getStats().options.maxThreads); fillWriterVectors(opCtx, &ops, &writerVectors, &derivedOps); // Wait for writes to finish before applying ops. @@ -1404,7 +1405,8 @@ StatusWith<OpTime> SyncTail::multiApply(OperationContext* opCtx, MultiApplier::O } { - std::vector<Status> statusVector(_writerPool->getStats().numThreads, Status::OK()); + std::vector<Status> statusVector(_writerPool->getStats().options.maxThreads, + Status::OK()); _applyOps(writerVectors, &statusVector, &multikeyVector, isDataConsistent); _writerPool->waitForIdle(); |