summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierlauro Sciarelli <pierlauro.sciarelli@mongodb.com>2021-10-20 08:40:18 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-20 09:03:27 +0000
commit80362db4494daa116f21e19c4c263621b897aead (patch)
treebd68b69b2afb021a294da0b9f00c6b893e296490
parent2cc0443fb5d8d786eb59f528c5f2a15c95785e27 (diff)
downloadmongo-80362db4494daa116f21e19c4c263621b897aead.tar.gz
SERVER-59882 Autosplit tests may get one less chunk than expected due to wuow commit delay
-rw-r--r--jstests/sharding/autosplit_low_cardinality.js33
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js6
-rw-r--r--jstests/sharding/write_cmd_auto_split.js232
3 files changed, 165 insertions, 106 deletions
diff --git a/jstests/sharding/autosplit_low_cardinality.js b/jstests/sharding/autosplit_low_cardinality.js
index 8997c25b035..0a3a867cfcb 100644
--- a/jstests/sharding/autosplit_low_cardinality.js
+++ b/jstests/sharding/autosplit_low_cardinality.js
@@ -41,13 +41,40 @@ insertBigDocsWithKey(-10, 4);
insertBigDocsWithKey(10, 4);
waitForOngoingChunkSplits(st);
-// At least one split should have been performed
-assert.gte(numChunks(), 2, "Number of chunks is less then 2, no split have been perfomed");
+let expectedNumChunks = 2;
+try {
+ // At least one split should have been performed
+ assert.gte(numChunks(),
+ expectedNumChunks,
+ "Number of chunks is less than 2, no split have been perfomed");
+} catch (e) {
+ // (SERVER-59882) split may not have happened due to commit delay of the inserted documents
+ print("Retrying performing one insert after catching exception " + e);
+ insertBigDocsWithKey(10, 1);
+ waitForOngoingChunkSplits(st);
+ assert.gte(
+ numChunks(),
+ expectedNumChunks,
+ "Number of chunks is less than " + expectedNumChunks + ", no split has been perfomed");
+}
+
+expectedNumChunks++;
insertBigDocsWithKey(20, 4);
waitForOngoingChunkSplits(st);
// An additional split should have been performed
-assert.gte(numChunks(), 3, "Number of chunks must be at least 3");
+try {
+ assert.gte(numChunks(), expectedNumChunks, "Number of chunks must be at least 3");
+} catch (e) {
+ // (SERVER-59882) split may not have happened due to commit delay of the inserted documents
+ print("Retrying performing one insert after catching exception " + e);
+ insertBigDocsWithKey(20, 1);
+ waitForOngoingChunkSplits(st);
+ assert.gte(
+ numChunks(),
+ expectedNumChunks,
+ "Number of chunks is less than " + 3 + ", not all expected splits have been perfomed");
+}
st.stop();
})();
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
index 8351f4f12ac..c53dca8fdf9 100644
--- a/jstests/sharding/shard_existing_coll_chunk_count.js
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -85,7 +85,7 @@ var runCase = function(opts) {
// Confirm number of chunks for this stage.
var numChunks = getNumberChunks(coll.getFullName());
- assert.gte(numChunks,
+ assert.lte(numChunks,
stage.expectedNumChunks,
'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
' chunks for stage ' + stageNum + ', but found ' + numChunks + '\nopts: ' +
@@ -160,7 +160,7 @@ runCase({
docSize: 510 * 1024,
stages: [
{numDocsToInsert: 10, expectedNumChunks: 6},
- {numDocsToInsert: 10, expectedNumChunks: 9},
+ {numDocsToInsert: 10, expectedNumChunks: 12},
],
});
@@ -169,7 +169,7 @@ runCase({
docSize: 514 * 1024,
stages: [
{numDocsToInsert: 10, expectedNumChunks: 10},
- {numDocsToInsert: 10, expectedNumChunks: 10},
+ {numDocsToInsert: 10, expectedNumChunks: 20},
],
});
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 8dc25082002..ed2af323f72 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -9,154 +9,186 @@ load("jstests/sharding/libs/find_chunks_util.js");
var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});
var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
var doc1k = (new Array(1024)).join('x');
var testDB = st.s.getDB('test');
-jsTest.log('Test single batch insert should auto-split');
+function testSingleBatchInsertShouldAutoSplit() {
+ jsTest.log('Test single batch insert should auto-split');
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
-// This should result in a little over 3MB inserted into the chunk, so with
-// a max chunk size of 1MB we'd expect the autosplitter to split this into
-// at least 3 chunks
-for (var x = 0; x < 3100; x++) {
- assert.commandWorked(testDB.runCommand(
- {insert: 'insert', documents: [{x: x, v: doc1k}], ordered: false, writeConcern: {w: 1}}));
-}
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
+
+ // This should result in a little over 3MB inserted into the chunk, so with
+ // a max chunk size of 1MB we'd expect the autosplitter to split this into
+ // at least 3 chunks
+ for (var x = 0; x < 3100; x++) {
+ assert.commandWorked(testDB.runCommand({
+ insert: 'insert',
+ documents: [{x: x, v: doc1k}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+ }
+
+ waitForOngoingChunkSplits(st);
-waitForOngoingChunkSplits(st);
+ // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
+ // more than 2.
+ assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 2);
+ testDB.dropDatabase();
-// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
-// more than 2.
-assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 2);
-testDB.dropDatabase();
+ jsTest.log('Test single batch update should auto-split');
-jsTest.log('Test single batch update should auto-split');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
+ for (var x = 0; x < 2100; x++) {
+ assert.commandWorked(testDB.runCommand({
+ update: 'update',
+ updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+ }
+
+ waitForOngoingChunkSplits(st);
-for (var x = 0; x < 2100; x++) {
- assert.commandWorked(testDB.runCommand({
- update: 'update',
- updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
- ordered: false,
- writeConcern: {w: 1}
- }));
+ assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
}
-waitForOngoingChunkSplits(st);
+function testSingleDeleteShouldNotAutoSplit() {
+ jsTest.log('Test single delete should not auto-split');
-assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
-testDB.dropDatabase();
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
-jsTest.log('Test single delete should not auto-split');
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+ for (var x = 0; x < 1100; x++) {
+ assert.commandWorked(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
+ }
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
+ // If we are autosplitting (which we shouldn't be), we want to wait until
+ // it's finished, otherwise we could falsely think no autosplitting was
+ // done when really it was just in progress.
+ waitForOngoingChunkSplits(st);
-for (var x = 0; x < 1100; x++) {
- assert.commandWorked(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
}
-// If we are autosplitting (which we shouldn't be), we want to wait until
-// it's finished, otherwise we could falsely think no autosplitting was
-// done when really it was just in progress.
-waitForOngoingChunkSplits(st);
-
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
-testDB.dropDatabase();
+function testBatchedInsertShouldAutoSplit() {
+ jsTest.log('Test batched insert should auto-split');
-jsTest.log('Test batched insert should auto-split');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.insert").itcount());
+ // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+ // we are going to be conservative.
+ for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
-// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
-// we are going to be conservative.
-for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+ for (var y = 0; y < 400; y++) {
+ docs.push({x: (x + y), v: doc1k});
+ }
- for (var y = 0; y < 400; y++) {
- docs.push({x: (x + y), v: doc1k});
+ assert.commandWorked(testDB.runCommand(
+ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
}
- assert.commandWorked(testDB.runCommand(
- {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
-}
+ waitForOngoingChunkSplits(st);
-waitForOngoingChunkSplits(st);
+ assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 1);
+}
-assert.gt(findChunksUtil.findChunksByNs(configDB, "test.insert").itcount(), 1);
-testDB.dropDatabase();
+function testBatchedUpdateShouldAutoSplit() {
+ jsTest.log('Test batched update should auto-split');
-jsTest.log('Test batched update should auto-split');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.update").itcount());
+ for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
-for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
+ }
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
+ assert.commandWorked(testDB.runCommand(
+ {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
}
- assert.commandWorked(
- testDB.runCommand({update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
-}
+ waitForOngoingChunkSplits(st);
-waitForOngoingChunkSplits(st);
+ assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
+}
-assert.gt(findChunksUtil.findChunksByNs(configDB, "test.update").itcount(), 1);
-testDB.dropDatabase();
+function testBatchedDeleteShouldNotAutoSplit() {
+ jsTest.log('Test batched delete should not auto-split');
-jsTest.log('Test batched delete should not auto-split');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
-assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
+ for (var x = 0; x < 2100; x += 400) {
+ var docs = [];
-for (var x = 0; x < 2100; x += 400) {
- var docs = [];
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id, v: doc1k}, top: 0});
+ }
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({q: {x: id, v: doc1k}, top: 0});
+ assert.commandWorked(testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ }));
}
- assert.commandWorked(testDB.runCommand({
- delete: 'delete',
- deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
- ordered: false,
- writeConcern: {w: 1}
- }));
-}
+ // If we are autosplitting (which we shouldn't be), we want to wait until
+ // it's finished, otherwise we could falsely think no autosplitting was
+ // done when really it was just in progress.
+ waitForOngoingChunkSplits(st);
-// If we are autosplitting (which we shouldn't be), we want to wait until
-// it's finished, otherwise we could falsely think no autosplitting was
-// done when really it was just in progress.
-waitForOngoingChunkSplits(st);
+ assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
+}
-assert.eq(1, findChunksUtil.findChunksByNs(configDB, "test.delete").itcount());
+var testCases = [
+ testSingleBatchInsertShouldAutoSplit,
+ testSingleDeleteShouldNotAutoSplit,
+ testBatchedInsertShouldAutoSplit,
+ testBatchedUpdateShouldAutoSplit,
+ testBatchedDeleteShouldNotAutoSplit
+];
+
+for (let testCase of testCases) {
+ try {
+ testDB.dropDatabase();
+ testCase();
+ } catch (e) {
+ print("Retrying test case failed due to " + e);
+ // (SERVER-59882) The split may not have happened due to write-unit-of-work commit delay
+ // Give it another best-effort try, given the low probability it would happen again
+ testDB.dropDatabase();
+ testCase();
+ }
+}
st.stop();
})();