summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosef Ahmad <josef.ahmad@mongodb.com>2021-12-17 22:42:13 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-12-21 17:35:11 +0000
commit049748570b5588ebe2f001bd2f0d8dc3278c9d2c (patch)
treec2074e5b84f78a8b74563948ef7ca2b82a95f156
parentba133ee0044dad667cbdb697ff89958b6fce1957 (diff)
downloadmongo-049748570b5588ebe2f001bd2f0d8dc3278c9d2c.tar.gz
SERVER-62023 Add dbCheck currentOp progress meter
(cherry picked from commit e386d65719fe60060d079f06e72ad0873f465cfb)
-rw-r--r--src/mongo/db/commands/dbcheck.cpp32
1 files changed, 24 insertions, 8 deletions
diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp
index 98f7d6ef9f7..1cd320d9860 100644
--- a/src/mongo/db/commands/dbcheck.cpp
+++ b/src/mongo/db/commands/dbcheck.cpp
@@ -198,9 +198,12 @@ protected:
tc.get()->setSystemOperationKillable(lk);
}
+ auto uniqueOpCtx = tc->makeOperationContext();
+ auto opCtx = uniqueOpCtx.get();
+
for (const auto& coll : *_run) {
try {
- _doCollection(coll);
+ _doCollection(opCtx, coll);
} catch (const DBException& e) {
auto logEntry = dbCheckErrorHealthLogEntry(
coll.nss, "dbCheck failed", OplogEntriesEnum::Batch, e.toStatus());
@@ -216,7 +219,7 @@ protected:
}
private:
- void _doCollection(const DbCheckCollectionInfo& info) {
+ void _doCollection(OperationContext* opCtx, const DbCheckCollectionInfo& info) {
// The collection was confirmed as existing in singleCollectionRun().
// runBatch() will handle the case of the collection having been dropped since then.
@@ -224,6 +227,18 @@ private:
return;
}
+ const std::string curOpMessage = "Scanning namespace " + info.nss.toString();
+ ProgressMeterHolder progress;
+ {
+ AutoGetCollection autoColl(opCtx, info.nss, MODE_IS);
+ auto coll = autoColl.getCollection();
+ if (coll) {
+ stdx::unique_lock<Client> lk(*opCtx->getClient());
+ progress.set(CurOp::get(opCtx)->setProgress_inlock(StringData(curOpMessage),
+ coll->numRecords(opCtx)));
+ }
+ }
+
// Parameters for the hasher.
auto start = info.start;
bool reachedEnd = false;
@@ -246,7 +261,8 @@ private:
docsInCurrentInterval = 0;
}
- auto result = _runBatch(info, start, info.maxDocsPerBatch, info.maxBytesPerBatch);
+ auto result =
+ _runBatch(opCtx, info, start, info.maxDocsPerBatch, info.maxBytesPerBatch);
if (_done) {
return;
@@ -306,6 +322,7 @@ private:
totalDocsSeen += stats.nDocs;
totalBytesSeen += stats.nBytes;
docsInCurrentInterval += stats.nDocs;
+ progress.get()->hit(stats.nDocs);
// Check if we've exceeded any limits.
bool reachedLast = stats.lastKey >= info.end;
@@ -321,6 +338,8 @@ private:
stdx::this_thread::sleep_for(timesExceeded * 1s - (Clock::now() - lastStart));
}
} while (!reachedEnd);
+
+ progress.finished();
}
/**
@@ -339,14 +358,11 @@ private:
std::string _dbName;
std::unique_ptr<DbCheckRun> _run;
- StatusWith<BatchStats> _runBatch(const DbCheckCollectionInfo& info,
+ StatusWith<BatchStats> _runBatch(OperationContext* opCtx,
+ const DbCheckCollectionInfo& info,
const BSONKey& first,
int64_t batchDocs,
int64_t batchBytes) {
- // New OperationContext for each batch.
- auto uniqueOpCtx = Client::getCurrent()->makeOperationContext();
- auto opCtx = uniqueOpCtx.get();
-
BatchStats result;
auto timeoutMs = Milliseconds(gDbCheckCollectionTryLockTimeoutMillis.load());
const auto initialBackoffMs =