summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <mao.cheahuychou@gmail.com>2023-04-21 19:04:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-04-24 02:33:42 +0000
commit2377a8d91c5f70e059390e996c6725ec2c4d21a3 (patch)
tree870e084315c458cc2584b05518db1d08a37c283a
parent4c86acfbb01abe1eb694a5a861ce78dcba3d79ab (diff)
downloadmongo-2377a8d91c5f70e059390e996c6725ec2c4d21a3.tar.gz
SERVER-76297 Make query sampling jstests check namespace and collection uuid when waiting for sampling to become active and inactive
(cherry picked from commit afc8fc41d98174bd71c1ecc0d15df9bae3605d7e)
-rw-r--r--jstests/multiVersion/targetedTestsLastLtsFeatures/query_sampling_after_downgrade.js5
-rw-r--r--jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js9
-rw-r--r--jstests/sharding/analyze_shard_key/libs/query_sampling_util.js149
-rw-r--r--jstests/sharding/analyze_shard_key/list_sampled_queries.js34
-rw-r--r--jstests/sharding/analyze_shard_key/read_and_write_distribution.js31
-rw-r--r--jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js4
-rw-r--r--jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js4
-rw-r--r--jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js13
-rw-r--r--jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js13
-rw-r--r--jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js13
-rw-r--r--jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js13
-rw-r--r--jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js5
-rw-r--r--jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js9
-rw-r--r--jstests/sharding/analyze_shard_key/shard_key_updates.js10
-rw-r--r--jstests/sharding/analyze_shard_key/ttl_delete_samples.js24
15 files changed, 233 insertions, 103 deletions
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/query_sampling_after_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/query_sampling_after_downgrade.js
index bedb4f91198..975be0b189b 100644
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/query_sampling_after_downgrade.js
+++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/query_sampling_after_downgrade.js
@@ -39,11 +39,10 @@ const testDb = st.s.getDB(dbName);
const testColl = testDb.getCollection(collName);
assert.commandWorked(testColl.insert([{x: 1}]));
+const collUuid = QuerySamplingUtil.getCollectionUuid(testDb, collName);
assert.commandWorked(st.s0.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate}));
-QuerySamplingUtil.waitForActiveSampling(st.s0);
-QuerySamplingUtil.waitForActiveSampling(st.s1);
-QuerySamplingUtil.waitForActiveSampling(shard0Primary);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid);
assert.commandWorked(shard0Primary.adminCommand(
{configureFailPoint: "disableQueryAnalysisWriterFlusher", mode: "alwaysOn"}));
diff --git a/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js b/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js
index c312636572c..be1fa109253 100644
--- a/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js
+++ b/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js
@@ -21,7 +21,7 @@ const mongosSetParameterOpts = {
queryAnalysisSamplerConfigurationRefreshSecs,
};
-function runTest(conn, primary) {
+function runTest(conn, primary, {st, rst}) {
const dbName = "testDb";
const collName = "testColl";
const ns = dbName + "." + collName;
@@ -33,9 +33,10 @@ function runTest(conn, primary) {
const sampleDiffNs = "config." + sampleDiffCollName;
assert.commandWorked(testColl.insert([{x: 1}]));
+ const collUuid = QuerySamplingUtil.getCollectionUuid(testDb, collName);
assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate}));
- QuerySamplingUtil.waitForActiveSampling(conn);
+ QuerySamplingUtil.waitForActiveSampling(ns, collUuid, {st, rst});
// Test insert to config.sampledQueries.
const fp1 = configureFailPoint(primary, "hangInsertBeforeWrite", {ns: sampleNs});
@@ -80,7 +81,7 @@ function runTest(conn, primary) {
});
jsTest.log("Testing deprioritized insert in sharded cluster.");
- runTest(st.s, st.rs0.getPrimary());
+ runTest(st.s, st.rs0.getPrimary(), {st});
st.stop();
}
@@ -90,7 +91,7 @@ function runTest(conn, primary) {
rst.initiate();
jsTest.log("Testing deprioritized insert in replica set.");
- runTest(rst.getPrimary(), rst.getPrimary());
+ runTest(rst.getPrimary(), rst.getPrimary(), {rst});
rst.stopSet();
}
diff --git a/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js b/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js
index 51c4011d28f..77319519909 100644
--- a/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js
+++ b/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js
@@ -27,15 +27,76 @@ var QuerySamplingUtil = (function() {
}
/**
- * Waits for the given node to have at least one active collection for query sampling. If
- * 'waitForTokens' is true, additionally waits for the sampling bucket to contain at least one
- * second of tokens.
+ * Returns the query sampling current op documents that match the given filter.
*/
- function waitForActiveSampling(node, waitForTokens = true) {
+ function getQuerySamplingCurrentOp(conn, filter) {
+ return conn.getDB("admin")
+ .aggregate([
+ {$currentOp: {allUsers: true, localOps: true}},
+ {$match: Object.assign({desc: "query analyzer"}, filter)},
+ ])
+ .toArray();
+ }
+
+ /**
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be active on the given node.
+ */
+ function waitForActiveSamplingOnNode(node, ns, collUuid) {
+ jsTest.log("Start waiting for active sampling " + tojson({node, ns, collUuid}));
+ let numTries = 0;
assert.soon(() => {
- const res = assert.commandWorked(node.adminCommand({serverStatus: 1}));
- assert(res.hasOwnProperty("queryAnalyzers"));
- return res.queryAnalyzers.activeCollections >= 1;
+ numTries++;
+
+ const docs = getQuerySamplingCurrentOp(node, {ns, collUuid});
+ if (docs.length == 1) {
+ return true;
+ }
+ assert.eq(docs.length, 0, docs);
+
+ if (numTries % 100 == 0) {
+ jsTest.log("Still waiting for active sampling " +
+ tojson({node, ns, collUuid, docs}));
+ }
+ return false;
+ });
+ jsTest.log("Finished waiting for active sampling " + tojson({node, ns, collUuid}));
+ }
+
+ /**
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be inactive on the given node.
+ */
+ function waitForInactiveSamplingOnNode(node, ns, collUuid) {
+ jsTest.log("Start waiting for inactive sampling " + tojson({node, ns, collUuid}));
+ let numTries = 0;
+ assert.soon(() => {
+ numTries++;
+
+ const docs = getQuerySamplingCurrentOp(node, {ns, collUuid});
+ if (docs.length == 0) {
+ return true;
+ }
+ assert.eq(docs.length, 1, docs);
+
+ if (numTries % 100 == 0) {
+ jsTest.log("Still waiting for inactive sampling " +
+ tojson({node, ns, collUuid, docs}));
+ }
+ return false;
+ });
+ jsTest.log("Finished waiting for inactive sampling " + tojson({node, ns, collUuid}));
+ }
+
+ /**
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be active on all nodes in the given replica set. If 'waitForTokens' is true, additionally
+ * waits for the sampling bucket to contain at least one second of tokens.
+ */
+ function waitForActiveSamplingReplicaSet(rst, ns, collUuid, waitForTokens = true) {
+ rst.nodes.forEach(node => {
+ // Skip waiting for tokens now and just wait once at the end if needed.
+ waitForActiveSamplingOnNode(node, ns, collUuid, false /* waitForTokens */);
});
if (waitForTokens) {
// Wait for the bucket to contain at least one second of tokens.
@@ -44,38 +105,72 @@ var QuerySamplingUtil = (function() {
}
/**
- * Waits for the given node to have no active collections for query sampling.
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be inactive on all nodes in the given replica set.
*/
- function waitForInactiveSampling(node) {
- assert.soon(() => {
- const res = assert.commandWorked(node.adminCommand({serverStatus: 1}));
- return res.queryAnalyzers.activeCollections == 0;
+ function waitForInactiveSamplingReplicaSet(rst, ns, collUuid) {
+ rst.nodes.forEach(node => {
+ waitForInactiveSamplingOnNode(node, ns, collUuid);
});
}
/**
- * Waits for all shard nodes to have one active collection for query sampling.
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be active on all mongos and shardsvr mongod nodes in the given sharded cluster.
*/
- function waitForActiveSamplingOnAllShards(st) {
- st._rs.forEach(rs => {
- rs.nodes.forEach(node => {
- // Skip waiting for tokens now and just wait once at the end.
- waitForActiveSampling(node, false /* waitForTokens */);
+ function waitForActiveSamplingShardedCluster(st, ns, collUuid, {skipMongoses} = {}) {
+ if (!skipMongoses) {
+ st.forEachMongos(mongos => {
+ waitForActiveSamplingOnNode(mongos, ns, collUuid);
});
+ }
+ st._rs.forEach(rst => {
+ // Skip waiting for tokens now and just wait once at the end if needed.
+ waitForActiveSamplingReplicaSet(rst, ns, collUuid, false /* waitForTokens */);
});
// Wait for the bucket to contain at least one second of tokens.
sleep(1000);
}
/**
- * Waits for all shard nodes to have no active collection for query sampling.
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be inactive on all mongos and shardsvr mongod nodes in the given sharded cluster.
*/
- function waitForInactiveSamplingOnAllShards(st) {
- st._rs.forEach(rs => {
- rs.nodes.forEach(node => {
- waitForInactiveSampling(node);
- });
+ function waitForInactiveSamplingShardedCluster(st, ns, collUuid) {
+ st.forEachMongos(mongos => {
+ waitForInactiveSamplingOnNode(mongos, ns, collUuid);
});
+ st._rs.forEach(rst => {
+ waitForInactiveSamplingReplicaSet(rst, ns, collUuid);
+ });
+ }
+
+ /**
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be active on all nodes in the given replica set or sharded cluster.
+ */
+ function waitForActiveSampling(ns, collUuid, {rst, st}) {
+ assert(rst || st);
+ assert(!rst || !st);
+ if (st) {
+ waitForActiveSamplingShardedCluster(st, ns, collUuid);
+ } else {
+ waitForActiveSamplingReplicaSet(rst, ns, collUuid);
+ }
+ }
+
+ /**
+ * Waits for the query sampling for the collection with the namespace and collection uuid
+ * to be inactive on all nodes in the given replica set or sharded cluster.
+ */
+ function waitForInactiveSampling(ns, collUuid, {rst, st}) {
+ assert(rst || st);
+ assert(!rst || !st);
+ if (st) {
+ waitForInactiveSamplingShardedCluster(st, ns, collUuid);
+ } else {
+ waitForInactiveSamplingReplicaSet(rst, ns, collUuid);
+ }
}
/**
@@ -327,10 +422,12 @@ var QuerySamplingUtil = (function() {
generateRandomString,
generateRandomCollation,
makeCmdObjIgnoreSessionInfo,
+ waitForActiveSamplingReplicaSet,
+ waitForInactiveSamplingReplicaSet,
+ waitForActiveSamplingShardedCluster,
+ waitForInactiveSamplingShardedCluster,
waitForActiveSampling,
waitForInactiveSampling,
- waitForActiveSamplingOnAllShards,
- waitForInactiveSamplingOnAllShards,
assertSubObject,
assertSoonSampledQueryDocuments,
assertSoonSampledQueryDocumentsAcrossShards,
diff --git a/jstests/sharding/analyze_shard_key/list_sampled_queries.js b/jstests/sharding/analyze_shard_key/list_sampled_queries.js
index 0d4bc7e0cdd..39786d7742c 100644
--- a/jstests/sharding/analyze_shard_key/list_sampled_queries.js
+++ b/jstests/sharding/analyze_shard_key/list_sampled_queries.js
@@ -13,13 +13,16 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
load("jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js");
const sampleRate = 10000;
+
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
const queryAnalysisWriterIntervalSecs = 1;
const mongodSetParameterOpts = {
+ queryAnalysisSamplerConfigurationRefreshSecs,
queryAnalysisWriterIntervalSecs,
};
const mongosSetParameterOpts = {
- queryAnalysisSamplerConfigurationRefreshSecs: 1,
+ queryAnalysisSamplerConfigurationRefreshSecs,
};
function insertDocuments(collection, numDocs) {
@@ -30,23 +33,21 @@ function insertDocuments(collection, numDocs) {
assert.commandWorked(bulk.execute());
}
-function runTest(conn, st) {
- const dbName = "test";
- const collName0 = "coll0";
- const collName1 = "coll1";
+function runTest(conn, {rst, st}) {
+ assert(rst || st);
+ assert(!rst || !st);
+
+ const dbName = "testDb";
+ const collName0 = "testColl0";
+ const collName1 = "testColl1";
const ns0 = dbName + "." + collName0;
const ns1 = dbName + "." + collName1;
const numDocs = 100;
const adminDb = conn.getDB("admin");
- const configDb = conn.getDB("config");
const testDb = conn.getDB(dbName);
const collection0 = testDb.getCollection(collName0);
const collection1 = testDb.getCollection(collName1);
- insertDocuments(collection0, numDocs);
- insertDocuments(collection1, numDocs);
- const collUuid0 = QuerySamplingUtil.getCollectionUuid(testDb, collName0);
- const collUuid1 = QuerySamplingUtil.getCollectionUuid(testDb, collName1);
if (st) {
// Shard collection1 and move one chunk to shard1.
@@ -60,9 +61,15 @@ function runTest(conn, st) {
conn.adminCommand({moveChunk: ns1, find: {x: 0}, to: st.shard1.shardName}));
}
+ insertDocuments(collection0, numDocs);
+ insertDocuments(collection1, numDocs);
+ const collUuid0 = QuerySamplingUtil.getCollectionUuid(testDb, collName0);
+ const collUuid1 = QuerySamplingUtil.getCollectionUuid(testDb, collName1);
+
conn.adminCommand({configureQueryAnalyzer: ns0, mode: "full", sampleRate});
conn.adminCommand({configureQueryAnalyzer: ns1, mode: "full", sampleRate});
- QuerySamplingUtil.waitForActiveSampling(conn, true);
+ QuerySamplingUtil.waitForActiveSampling(ns0, collUuid0, {rst, st});
+ QuerySamplingUtil.waitForActiveSampling(ns1, collUuid1, {rst, st});
// Create read samples on collection0.
let expectedSamples = [];
@@ -169,7 +176,7 @@ function runTest(conn, st) {
mongosOptions: {setParameter: mongosSetParameterOpts}
});
- runTest(st.s, st);
+ runTest(st.s, {st});
st.stop();
}
@@ -178,8 +185,9 @@ function runTest(conn, st) {
const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: mongodSetParameterOpts}});
rst.startSet();
rst.initiate();
+ const primary = rst.getPrimary();
- runTest(rst.getPrimary());
+ runTest(primary, {rst});
rst.stopSet();
}
diff --git a/jstests/sharding/analyze_shard_key/read_and_write_distribution.js b/jstests/sharding/analyze_shard_key/read_and_write_distribution.js
index 4e700bd666b..f20abaa7bac 100644
--- a/jstests/sharding/analyze_shard_key/read_and_write_distribution.js
+++ b/jstests/sharding/analyze_shard_key/read_and_write_distribution.js
@@ -481,6 +481,8 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) {
docs.push({_id: i, x: i, y: i, ts: new Date()});
}
assert.commandWorked(sampledColl.insert(docs));
+ const sampledCollUuid =
+ QuerySamplingUtil.getCollectionUuid(fixture.conn.getDB(dbName), sampledCollName);
// Verify that the analyzeShardKey command returns zeros for the read and write sample size
// when there are no sampled queries.
@@ -491,7 +493,7 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) {
// Turn on query sampling and wait for sampling to become active.
assert.commandWorked(
fixture.conn.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", sampleRate}));
- fixture.waitForActiveSamplingFn();
+ fixture.waitForActiveSamplingFn(sampledNs, sampledCollUuid);
// Create and run test queries.
const testCase = makeTestCase(
@@ -504,7 +506,7 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) {
// getting sampled.
assert.commandWorked(
fixture.conn.adminCommand({configureQueryAnalyzer: sampledNs, mode: "off"}));
- fixture.waitForInactiveSamplingFn();
+ fixture.waitForInactiveSamplingFn(sampledNs, sampledCollUuid);
res = waitForSampledQueries(fixture.conn, sampledNs, shardKey, testCase);
// Verify that the metrics are as expected.
@@ -585,10 +587,8 @@ const mongosSetParametersOpts = {
st.s0.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName}));
}
},
- waitForActiveSamplingFn: () => {
- for (let i = 0; i < numMongoses; i++) {
- QuerySamplingUtil.waitForActiveSampling(st["s" + String(i)]);
- }
+ waitForActiveSamplingFn: (ns, collUuid) => {
+ QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid);
},
runCmdsFn: (dbName, cmdObjs) => {
for (let i = 0; i < cmdObjs.length; i++) {
@@ -596,11 +596,8 @@ const mongosSetParametersOpts = {
assert.commandWorked(db.runCommand(cmdObjs[i]));
}
},
- waitForInactiveSamplingFn: () => {
- for (let i = 0; i < numMongoses; i++) {
- QuerySamplingUtil.waitForInactiveSampling(st["s" + String(i)]);
- }
- QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st);
+ waitForInactiveSamplingFn: (ns, collUuid) => {
+ QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collUuid);
}
};
@@ -642,10 +639,8 @@ const mongosSetParametersOpts = {
setUpCollectionFn: (dbName, collName, isShardedColl) => {
// No setup is needed.
},
- waitForActiveSamplingFn: () => {
- rst.nodes.forEach(node => {
- QuerySamplingUtil.waitForActiveSampling(node);
- });
+ waitForActiveSamplingFn: (ns, collUuid) => {
+ QuerySamplingUtil.waitForActiveSamplingReplicaSet(rst, ns, collUuid);
},
runCmdsFn: (dbName, cmdObjs) => {
for (let i = 0; i < cmdObjs.length; i++) {
@@ -653,10 +648,8 @@ const mongosSetParametersOpts = {
assert.commandWorked(node.getDB(dbName).runCommand(cmdObjs[i]));
}
},
- waitForInactiveSamplingFn: () => {
- rst.nodes.forEach(node => {
- QuerySamplingUtil.waitForInactiveSampling(node);
- });
+ waitForInactiveSamplingFn: (ns, collUuid) => {
+ QuerySamplingUtil.waitForInactiveSamplingReplicaSet(rst, ns, collUuid);
}
};
diff --git a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js
index 2ac5c33c2ca..74778a977de 100644
--- a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js
@@ -52,7 +52,9 @@ assert.commandWorked(
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSamplingOnAllShards(st);
+const foreignCollUUid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(
+ st, foreignNs, foreignCollUUid, {skipMongoses: true});
for (let {name,
makeOuterPipelineFunc,
diff --git a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js
index 38c79496908..52506ab8391 100644
--- a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js
@@ -43,7 +43,9 @@ assert.commandWorked(mongosDB.createCollection(foreignCollName));
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSamplingOnAllShards(st);
+const foreignCollUUid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(
+ st, foreignNs, foreignCollUUid, {skipMongoses: true});
// The foreign collection is unsharded so all documents are on the primary shard.
const shardNames = [st.rs0.name];
diff --git a/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js
index 1cbd72406a9..ba7c2f6f3fa 100644
--- a/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js
@@ -10,14 +10,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
// Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a
// period of 1 second to speed up the test.
+const queryAnalysisWriterIntervalSecs = 1;
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
const st = new ShardingTest({
shards: 3,
rs: {
nodes: 2,
- setParameter:
- {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})}
+ setParameter: {
+ queryAnalysisSamplerConfigurationRefreshSecs,
+ queryAnalysisWriterIntervalSecs,
+ logComponentVerbosity: tojson({sharding: 2})
+ }
},
- mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}}
+ mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}}
});
const dbName = "testDb";
@@ -41,7 +46,7 @@ const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName);
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSampling(st.s);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
const expectedSampledQueryDocs = [];
diff --git a/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js
index e21fcb96f04..0e2f910a846 100644
--- a/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js
@@ -11,14 +11,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
// Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a
// period of 1 second to speed up the test.
+const queryAnalysisWriterIntervalSecs = 1;
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
const st = new ShardingTest({
shards: 2,
rs: {
nodes: 2,
- setParameter:
- {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})}
+ setParameter: {
+ queryAnalysisSamplerConfigurationRefreshSecs,
+ queryAnalysisWriterIntervalSecs,
+ logComponentVerbosity: tojson({sharding: 2})
+ }
},
- mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}}
+ mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}}
});
const dbName = "testDb";
@@ -34,7 +39,7 @@ const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName);
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSampling(st.s);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
const expectedSampledQueryDocs = [];
// This is an unsharded collection so all documents are on the primary shard.
diff --git a/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js
index e3f10c1190a..2d8cbc55715 100644
--- a/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js
@@ -10,14 +10,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
// Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a
// period of 1 second to speed up the test.
+const queryAnalysisWriterIntervalSecs = 1;
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
const st = new ShardingTest({
shards: 3,
rs: {
nodes: 2,
- setParameter:
- {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})}
+ setParameter: {
+ queryAnalysisSamplerConfigurationRefreshSecs,
+ queryAnalysisWriterIntervalSecs,
+ logComponentVerbosity: tojson({sharding: 2})
+ }
},
- mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}}
+ mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}}
});
const dbName = "testDb";
@@ -43,7 +48,7 @@ const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName);
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSampling(st.s);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
const expectedSampledQueryDocs = [];
diff --git a/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js
index 9f28c130f30..e921526366a 100644
--- a/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js
+++ b/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js
@@ -11,14 +11,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
// Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a
// period of 1 second to speed up the test.
+const queryAnalysisWriterIntervalSecs = 1;
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
const st = new ShardingTest({
shards: 2,
rs: {
nodes: 2,
- setParameter:
- {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})}
+ setParameter: {
+ queryAnalysisSamplerConfigurationRefreshSecs,
+ queryAnalysisWriterIntervalSecs,
+ logComponentVerbosity: tojson({sharding: 2})
+ }
},
- mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}}
+ mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}}
});
const dbName = "testDb";
@@ -34,7 +39,7 @@ const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName);
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000}));
-QuerySamplingUtil.waitForActiveSampling(st.s);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
const expectedSampledQueryDocs = [];
// This is an unsharded collection so all documents are on the primary shard.
diff --git a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js
index ae28250cbd4..f4e91a0f155 100644
--- a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js
+++ b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js
@@ -42,6 +42,7 @@ for (let i = 0; i < numDocs; i++) {
bulk.insert({x: i, y: i});
}
assert.commandWorked(bulk.execute());
+const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName);
function runCommandAndAssertCurrentOpAndServerStatus(opKind, cmdObj, oldState) {
assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj));
@@ -62,7 +63,7 @@ assert.eq(
// Start query sampling.
assert.commandWorked(
primary.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate}));
-QuerySamplingUtil.waitForActiveSampling(primary);
+QuerySamplingUtil.waitForActiveSamplingReplicaSet(rst, ns, collUuid);
// Execute different kinds of queries and check counters.
const cmdObj0 = {
@@ -98,7 +99,7 @@ const state4 = runCommandAndAssertCurrentOpAndServerStatus(opKindWrite, cmdObj4,
// Stop query sampling.
assert.commandWorked(primary.adminCommand({configureQueryAnalyzer: ns, mode: "off"}));
-QuerySamplingUtil.waitForInactiveSampling(primary);
+QuerySamplingUtil.waitForInactiveSamplingReplicaSet(rst, ns, collUuid);
const expectedFinalState = Object.assign({}, state4, true /* deep */);
expectedFinalState.currentOp = [];
diff --git a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js
index d1c0553c96d..ac8f3b3daa4 100644
--- a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js
+++ b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js
@@ -52,6 +52,7 @@ for (let i = 0; i < numDocs; i++) {
bulk.insert({x: i, y: i});
}
assert.commandWorked(bulk.execute());
+const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName);
function makeInitialCurrentOpAndServerStatus(numColls) {
return {
@@ -92,9 +93,7 @@ assert.eq(bsonWoCompare(currentState, makeInitialCurrentOpAndServerStatus(0)), 0
// Start query sampling.
assert.commandWorked(
st.s0.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate}));
-QuerySamplingUtil.waitForActiveSampling(st.s0);
-QuerySamplingUtil.waitForActiveSampling(st.s1);
-QuerySamplingUtil.waitForActiveSampling(st.rs0.getPrimary());
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid);
// Wait for at least one refresh interval to make the inactive mongos find out that its sample rate
// is 0.
sleep(2 * queryAnalysisSamplerConfigurationRefreshSecs);
@@ -133,9 +132,7 @@ const state4 = runCommandAndAssertCurrentOpAndServerStatus(opKindWrite, cmdObj4,
// Stop query sampling.
assert.commandWorked(st.s0.adminCommand({configureQueryAnalyzer: ns, mode: "off"}));
-QuerySamplingUtil.waitForInactiveSampling(st.s0);
-QuerySamplingUtil.waitForInactiveSampling(st.s1);
-QuerySamplingUtil.waitForInactiveSampling(st.rs0.getPrimary());
+QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collUuid);
const expectedFinalState = Object.assign({}, state4, true /* deep */);
expectedFinalState.mongos0.currentOp = [];
diff --git a/jstests/sharding/analyze_shard_key/shard_key_updates.js b/jstests/sharding/analyze_shard_key/shard_key_updates.js
index d5992154c98..27c27af4705 100644
--- a/jstests/sharding/analyze_shard_key/shard_key_updates.js
+++ b/jstests/sharding/analyze_shard_key/shard_key_updates.js
@@ -113,7 +113,7 @@ function runTest({isShardedColl, execCtxType}) {
const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName);
assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate}));
- QuerySamplingUtil.waitForActiveSampling(st.s);
+ QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
// Test with a mix of modifier, replacement and pipeline updates and findAndModify updates.
let numUpdates = 0;
@@ -222,8 +222,7 @@ function runTest({isShardedColl, execCtxType}) {
// preventing the internal aggregate commands run by the analyzeShardKey commands below from
// getting sampled.
assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "off"}));
- QuerySamplingUtil.waitForInactiveSampling(st.s);
- QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st);
+ QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collectionUuid);
let numTotal = numUpdates + numFindAndModifys;
assert.soon(() => {
@@ -240,7 +239,7 @@ function runTest({isShardedColl, execCtxType}) {
assert.eq(res0.writeDistribution.percentageOfShardKeyUpdates, 100, res0);
assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate}));
- QuerySamplingUtil.waitForActiveSampling(st.s);
+ QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid);
// Below are not shard key updates.
@@ -293,8 +292,7 @@ function runTest({isShardedColl, execCtxType}) {
// preventing the internal aggregate commands run by the analyzeShardKey commands below from
// getting sampled.
assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "off"}));
- QuerySamplingUtil.waitForInactiveSampling(st.s);
- QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st);
+ QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collectionUuid);
numTotal = numUpdates + numFindAndModifys;
assert.soon(() => {
diff --git a/jstests/sharding/analyze_shard_key/ttl_delete_samples.js b/jstests/sharding/analyze_shard_key/ttl_delete_samples.js
index dd153ca0ff8..10f05677d40 100644
--- a/jstests/sharding/analyze_shard_key/ttl_delete_samples.js
+++ b/jstests/sharding/analyze_shard_key/ttl_delete_samples.js
@@ -9,19 +9,30 @@
load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
+const queryAnalysisSamplerConfigurationRefreshSecs = 1;
+const queryAnalysisWriterIntervalSecs = 1;
+// To speed up the test, make the sampled query documents expire right away. To prevent the
+// documents from being deleted before the count is verified, make the TTL monitor have a large
+// sleep interval at first and then lower it at the end of the test when verifying that the
+// documents do get deleted by the TTL monitor.
+const queryAnalysisSampleExpirationSecs = 1;
+const ttlMonitorSleepSecs = 3600;
+
const st = new ShardingTest({
shards: 1,
rs: {
nodes: 2,
setParameter: {
- queryAnalysisWriterIntervalSecs: 1,
- queryAnalysisSampleExpirationSecs: 2,
+ queryAnalysisSamplerConfigurationRefreshSecs,
+ queryAnalysisWriterIntervalSecs,
+ queryAnalysisSampleExpirationSecs,
+ ttlMonitorSleepSecs,
logComponentVerbosity: tojson({sharding: 2})
}
},
mongosOptions: {
setParameter: {
- queryAnalysisSamplerConfigurationRefreshSecs: 1,
+ queryAnalysisSamplerConfigurationRefreshSecs,
}
},
});
@@ -41,12 +52,12 @@ for (let i = 0; i < kNumDocs; i++) {
bulk.insert({x: i, y: i});
}
assert.commandWorked(bulk.execute());
+const collUuid = QuerySamplingUtil.getCollectionUuid(testDB, collName);
// Enable query sampling
assert.commandWorked(
st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000}));
-
-QuerySamplingUtil.waitForActiveSampling(st.s);
+QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid);
// Find each document
for (let i = 0; i < kNumDocs; i++) {
@@ -71,9 +82,10 @@ assert.soon(() => {
});
printjson({"numQueryDocs": numQueryDocs, "numDiffDocs": numDiffDocs});
+// Lower the TTL monitor sleep interval.
assert.commandWorked(shard0Primary.adminCommand({setParameter: 1, ttlMonitorSleepSecs: 1}));
-// Assert that query sample documents have been deleted
+// Assert that query sample documents are eventually deleted.
assert.soon(() => {
return (QuerySamplingUtil.getNumSampledQueryDocuments(st) == 0 &&
QuerySamplingUtil.getNumSampledQueryDiffDocuments(st) == 0);