summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Wlodarek <gregory.wlodarek@mongodb.com>2019-10-23 19:55:42 +0000
committerevergreen <evergreen@mongodb.com>2019-10-23 19:55:42 +0000
commitafcdf8a352a1b56cd150b8f373fa8a5b7786880b (patch)
tree01c0bb90edae5dc21f8281147b5e91eebfde78c0
parentde524442c2ab683d31c942c6d0b3e84a6a2a3c65 (diff)
downloadmongo-afcdf8a352a1b56cd150b8f373fa8a5b7786880b.tar.gz
SERVER-43815 Validate cmd should report MB of I/O per second in curOp so that users can make more informed use of throttling via the 'maxValidateMBperSec' server parameter
-rw-r--r--src/mongo/db/catalog/SConscript1
-rw-r--r--src/mongo/db/catalog/throttle_cursor.cpp33
-rw-r--r--src/mongo/db/catalog/throttle_cursor.h11
-rw-r--r--src/mongo/db/catalog/throttle_cursor_test.cpp28
-rw-r--r--src/mongo/db/catalog/validate_state.cpp2
-rw-r--r--src/mongo/db/curop.cpp11
-rw-r--r--src/mongo/db/curop.h4
7 files changed, 63 insertions, 27 deletions
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index b9f0a093681..70cb654df94 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -378,6 +378,7 @@ env.Library(
'throttle_cursor.cpp',
],
LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/curop',
'$BUILD_DIR/mongo/util/fail_point',
'max_validate_mb_per_sec_idl',
],
diff --git a/src/mongo/db/catalog/throttle_cursor.cpp b/src/mongo/db/catalog/throttle_cursor.cpp
index ed83668c89f..f94306c11b9 100644
--- a/src/mongo/db/catalog/throttle_cursor.cpp
+++ b/src/mongo/db/catalog/throttle_cursor.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/catalog/throttle_cursor.h"
#include "mongo/db/catalog/max_validate_mb_per_sec_gen.h"
+#include "mongo/db/curop.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/operation_context.h"
@@ -120,21 +121,23 @@ boost::optional<KeyStringEntry> SortedDataInterfaceThrottleCursor::nextKeyString
}
void DataThrottle::awaitIfNeeded(OperationContext* opCtx, const int64_t dataSize) {
- if (_shouldNotThrottle) {
- return;
- }
-
- // No throttling should take place if 'gMaxValidateMBperSec' is zero.
- uint64_t maxValidateBytesPerSec = gMaxValidateMBperSec.loadRelaxed() * 1024 * 1024;
- if (maxValidateBytesPerSec == 0) {
- return;
- }
-
int64_t currentMillis =
opCtx->getServiceContext()->getFastClockSource()->now().toMillisSinceEpoch();
// Reset the tracked information as the second has rolled over the starting point.
if (currentMillis >= _startMillis + 1000) {
+ float elapsedTimeSec = static_cast<float>(currentMillis - _startMillis) / 1000;
+ float mbProcessed = static_cast<float>(_bytesProcessed + dataSize) / 1024 / 1024;
+
+ // Update how much data we've seen in the last second for CurOp.
+ CurOp::get(opCtx)->debug().dataThroughputLastSecond = mbProcessed / elapsedTimeSec;
+
+ _totalMBProcessed += mbProcessed;
+ _totalElapsedTimeSec += elapsedTimeSec;
+
+ // Update how much data we've seen throughout the lifetime of the DataThrottle for CurOp.
+ CurOp::get(opCtx)->debug().dataThroughputAverage = _totalMBProcessed / _totalElapsedTimeSec;
+
_startMillis = currentMillis;
_bytesProcessed = 0;
}
@@ -143,6 +146,16 @@ void DataThrottle::awaitIfNeeded(OperationContext* opCtx, const int64_t dataSize
? /*512KB*/ 1 * 1024 * 512
: dataSize;
+ if (_shouldNotThrottle) {
+ return;
+ }
+
+ // No throttling should take place if 'gMaxValidateMBperSec' is zero.
+ uint64_t maxValidateBytesPerSec = gMaxValidateMBperSec.load() * 1024 * 1024;
+ if (maxValidateBytesPerSec == 0) {
+ return;
+ }
+
if (_bytesProcessed < maxValidateBytesPerSec) {
return;
}
diff --git a/src/mongo/db/catalog/throttle_cursor.h b/src/mongo/db/catalog/throttle_cursor.h
index 2240ee9b58e..97e625b146b 100644
--- a/src/mongo/db/catalog/throttle_cursor.h
+++ b/src/mongo/db/catalog/throttle_cursor.h
@@ -107,7 +107,13 @@ private:
*/
class DataThrottle {
public:
- DataThrottle() : _startMillis(0), _bytesProcessed(0), _shouldNotThrottle(false) {}
+ DataThrottle(OperationContext* opCtx)
+ : _startMillis(
+ opCtx->getServiceContext()->getFastClockSource()->now().toMillisSinceEpoch()),
+ _bytesProcessed(0),
+ _totalElapsedTimeSec(0),
+ _totalMBProcessed(0),
+ _shouldNotThrottle(false) {}
/**
* If throttling is not enabled by calling turnThrottlingOff(), or if
@@ -134,6 +140,9 @@ private:
// contain the number of bytes processed between '_startMillis' and '_startMillis + 999'.
uint64_t _bytesProcessed;
+ float _totalElapsedTimeSec;
+ float _totalMBProcessed;
+
// Whether the throttle should be active.
bool _shouldNotThrottle;
};
diff --git a/src/mongo/db/catalog/throttle_cursor_test.cpp b/src/mongo/db/catalog/throttle_cursor_test.cpp
index b1a39a65937..52b5fc853a9 100644
--- a/src/mongo/db/catalog/throttle_cursor_test.cpp
+++ b/src/mongo/db/catalog/throttle_cursor_test.cpp
@@ -47,6 +47,7 @@ namespace {
const NamespaceString kNss = NamespaceString("test.throttleCursor");
const KeyString::Value kMinKeyString = KeyString::Value();
+const uint8_t kTickDelay = 200;
class ThrottleCursorTest : public CatalogTestFixture {
private:
@@ -60,7 +61,7 @@ public:
int64_t getDifferenceInMillis(Date_t start, Date_t end);
SortedDataInterfaceThrottleCursor getIdIndex(Collection* coll);
- DataThrottle _dataThrottle;
+ std::unique_ptr<DataThrottle> _dataThrottle;
};
void ThrottleCursorTest::setUp() {
@@ -84,14 +85,11 @@ void ThrottleCursorTest::setUp() {
wuow.commit();
}
- // Set a default tick rate of 200 milliseconds.
std::unique_ptr<ClockSourceMock> clkSource =
- std::make_unique<AutoAdvancingClockSourceMock>(Milliseconds(200));
-
- // Set the initial time to be 1000 milliseconds.
- clkSource->advance(Milliseconds(999));
+ std::make_unique<AutoAdvancingClockSourceMock>(Milliseconds(kTickDelay));
operationContext()->getServiceContext()->setFastClockSource(std::move(clkSource));
+ _dataThrottle = std::make_unique<DataThrottle>(operationContext());
}
void ThrottleCursorTest::tearDown() {
@@ -115,7 +113,7 @@ SortedDataInterfaceThrottleCursor ThrottleCursorTest::getIdIndex(Collection* col
const IndexCatalogEntry* idEntry = coll->getIndexCatalog()->getEntry(idDesc);
const IndexAccessMethod* iam = idEntry->accessMethod();
- return SortedDataInterfaceThrottleCursor(operationContext(), iam, &_dataThrottle);
+ return SortedDataInterfaceThrottleCursor(operationContext(), iam, _dataThrottle.get());
}
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
@@ -127,7 +125,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
SeekableRecordThrottleCursor cursor =
- SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), &_dataThrottle);
+ SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), _dataThrottle.get());
// With the data throttle off, all operations should finish within a second.
setMaxMbPerSec(0);
@@ -148,7 +146,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 20);
- ASSERT_TRUE(getDifferenceInMillis(start, end) <= 1000);
+ ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
}
TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
@@ -160,7 +158,7 @@ TEST_F(ThrottleCursorTest, TestSeekableRecordThrottleCursorOn) {
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
SeekableRecordThrottleCursor cursor =
- SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), &_dataThrottle);
+ SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), _dataThrottle.get());
// Using a throttle with a limit of 1MB per second, all operations should take at least 5
// seconds to finish. We have 10 records, each of which is 0.5MB courtesy of the fail point, so
@@ -227,7 +225,7 @@ TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 10);
- ASSERT_TRUE(getDifferenceInMillis(start, end) <= 1000);
+ ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
}
TEST_F(ThrottleCursorTest, TestSortedDataInterfaceThrottleCursorOn) {
@@ -290,13 +288,13 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOff) {
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
SeekableRecordThrottleCursor recordCursor =
- SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), &_dataThrottle);
+ SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), _dataThrottle.get());
SortedDataInterfaceThrottleCursor indexCursor = getIdIndex(coll);
// With the data throttle off, all operations should finish within a second, regardless if
// the 'maxValidateMBperSec' server parameter is set.
- _dataThrottle.turnThrottlingOff();
+ _dataThrottle->turnThrottlingOff();
setMaxMbPerSec(10);
Date_t start = getTime();
@@ -320,7 +318,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOff) {
Date_t end = getTime();
ASSERT_EQ(numRecords, 30);
- ASSERT_TRUE(getDifferenceInMillis(start, end) <= 1000);
+ ASSERT_EQ(getDifferenceInMillis(start, end), kTickDelay * numRecords + kTickDelay);
}
TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
@@ -332,7 +330,7 @@ TEST_F(ThrottleCursorTest, TestMixedCursorsWithSharedThrottleOn) {
FailPointEnableBlock failPoint("fixedCursorDataSizeOf512KBForDataThrottle");
SeekableRecordThrottleCursor recordCursor =
- SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), &_dataThrottle);
+ SeekableRecordThrottleCursor(opCtx, coll->getRecordStore(), _dataThrottle.get());
SortedDataInterfaceThrottleCursor indexCursor = getIdIndex(coll);
diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp
index b16636ff0f9..f9e0b93c8ac 100644
--- a/src/mongo/db/catalog/validate_state.cpp
+++ b/src/mongo/db/catalog/validate_state.cpp
@@ -55,7 +55,7 @@ ValidateState::ValidateState(OperationContext* opCtx,
const NamespaceString& nss,
bool background,
bool fullValidate)
- : _nss(nss), _background(background), _fullValidate(fullValidate) {
+ : _nss(nss), _background(background), _fullValidate(fullValidate), _dataThrottle(opCtx) {
// Subsequent re-locks will use the UUID when 'background' is true.
if (_background) {
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index b8df781916f..feb616ded07 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -698,6 +698,14 @@ string OpDebug::report(OperationContext* opCtx, const SingleThreadedLockStats* l
s << " prepareConflictDuration: " << prepareConflictDurationMillis;
}
+ if (dataThroughputLastSecond) {
+ s << " dataThroughputLastSecond: " << *dataThroughputLastSecond << " MB/sec";
+ }
+
+ if (dataThroughputAverage) {
+ s << " dataThroughputAverage: " << *dataThroughputAverage << " MB/sec";
+ }
+
OPDEBUG_TOSTRING_HELP(nShards);
OPDEBUG_TOSTRING_HELP(cursorid);
if (mongotCursorId) {
@@ -829,6 +837,9 @@ void OpDebug::append(OperationContext* opCtx,
OPDEBUG_APPEND_ATOMIC("prepareReadConflicts", additiveMetrics.prepareReadConflicts);
OPDEBUG_APPEND_ATOMIC("writeConflicts", additiveMetrics.writeConflicts);
+ OPDEBUG_APPEND_OPTIONAL("dataThroughputLastSecond", dataThroughputLastSecond);
+ OPDEBUG_APPEND_OPTIONAL("dataThroughputAverage", dataThroughputAverage);
+
b.appendNumber("numYield", curop.numYields());
OPDEBUG_APPEND_NUMBER(nreturned);
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 1c1837496b2..7eff84241d6 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -229,6 +229,10 @@ public:
// Stores the duration of time spent blocked on prepare conflicts.
Milliseconds prepareConflictDurationMillis{0};
+ // Stores the amount of the data processed by the throttle cursors in MB/sec.
+ boost::optional<float> dataThroughputLastSecond;
+ boost::optional<float> dataThroughputAverage;
+
// Stores additive metrics.
AdditiveMetrics additiveMetrics;