summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2018-01-29 15:00:44 -0500
committerRandolph Tan <randolph@10gen.com>2018-02-05 15:07:39 -0500
commit731a5ed5e2859ecc76b1b241c086406f9e6c3867 (patch)
tree5ed11ab38e48b1424e0268b0ce4189468ef135e0
parentb7a8411eed8fece571b48406b0be02ed1bc9eacf (diff)
downloadmongo-731a5ed5e2859ecc76b1b241c086406f9e6c3867.tar.gz
SERVER-32970 put back random chunk size initialization in mongos auto split tracking
-rw-r--r--jstests/sharding/write_cmd_auto_split.js8
-rw-r--r--src/mongo/s/chunk.cpp23
-rw-r--r--src/mongo/s/chunk.h2
3 files changed, 27 insertions, 6 deletions
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index ab5ca26c84f..74155d8b0bf 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -40,7 +40,7 @@
assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var x = 0; x < 1100; x++) {
+ for (var x = 0; x < 2100; x++) {
assert.writeOK(testDB.runCommand({
update: 'update',
updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
@@ -80,7 +80,7 @@
// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
// we are going to be conservative.
- for (var x = 0; x < 1100; x += 400) {
+ for (var x = 0; x < 2100; x += 400) {
var docs = [];
for (var y = 0; y < 400; y++) {
@@ -101,7 +101,7 @@
assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());
- for (var x = 0; x < 1100; x += 400) {
+ for (var x = 0; x < 2100; x += 400) {
var docs = [];
for (var y = 0; y < 400; y++) {
@@ -123,7 +123,7 @@
assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
- for (var x = 0; x < 1100; x += 400) {
+ for (var x = 0; x < 2100; x += 400) {
var docs = [];
for (var y = 0; y < 400; y++) {
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 3a25fa1c055..527440594eb 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -32,16 +32,37 @@
#include "mongo/s/chunk.h"
+#include "mongo/platform/random.h"
+#include "mongo/s/grid.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
+namespace {
+
+// Test whether we should split once data * kSplitCheckInterval > chunkSize (approximately)
+PseudoRandom prng(static_cast<int64_t>(time(0)));
+
+// Assume user has 64MB chunkSize setting. It is ok if this assumption is wrong since it is only
+// a heuristic.
+const int64_t kMaxDataWritten = 64 / Chunk::kSplitTestFactor;
+
+/**
+ * Generates a random value for _dataWritten so that a mongos restart wouldn't cause delay in
+ * splitting.
+ */
+int64_t mkDataWritten() {
+ return prng.nextInt64(kMaxDataWritten);
+}
+
+} // namespace
+
Chunk::Chunk(const ChunkType& from)
: _range(from.getMin(), from.getMax()),
_shardId(from.getShard()),
_lastmod(from.getVersion()),
_jumbo(from.getJumbo()),
- _dataWritten(0) {
+ _dataWritten(mkDataWritten()) {
invariantOK(from.validate());
}
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 867bdce54fc..540238463b5 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -42,7 +42,7 @@ class BSONObj;
class Chunk {
public:
// Test whether we should split once data * kSplitTestFactor > chunkSize (approximately)
- const uint64_t kSplitTestFactor = 5;
+ static const uint64_t kSplitTestFactor = 5;
explicit Chunk(const ChunkType& from);