summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Fefer <ivan.fefer@mongodb.com>2022-11-02 14:08:39 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-02 14:41:10 +0000
commit76e583ed12d536aba872920568b5324d1dcae713 (patch)
treebf548e3dd26a94e2d38b85924bdfd8293bdc9686
parentf509f99f4e55e38397a7bed3070a715ad6a7dcf9 (diff)
downloadmongo-76e583ed12d536aba872920568b5324d1dcae713.tar.gz
SERVER-58276 Add collscan plan if collection is clustered and collscan uses clustered index
-rw-r--r--jstests/core/cover_null_queries.js16
-rw-r--r--jstests/core/profile_update.js27
-rw-r--r--jstests/core/timeseries/timeseries_index_partial.js25
-rw-r--r--jstests/core/timeseries/timeseries_index_use.js25
-rw-r--r--jstests/sharding/timeseries_query.js2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_check_resume_token_test.cpp4
-rw-r--r--src/mongo/db/query/query_planner.cpp39
-rw-r--r--src/mongo/dbtests/query_stage_batched_delete.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp9
11 files changed, 112 insertions, 66 deletions
diff --git a/jstests/core/cover_null_queries.js b/jstests/core/cover_null_queries.js
index c17b9e71929..7054a16039d 100644
--- a/jstests/core/cover_null_queries.js
+++ b/jstests/core/cover_null_queries.js
@@ -11,6 +11,7 @@
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
load("jstests/libs/analyze_plan.js"); // For getAggPlanStages() and getPlanStages().
+load("jstests/libs/clustered_collections/clustered_collection_util.js");
const coll = db.cover_null_queries;
coll.drop();
@@ -95,6 +96,15 @@ function validateCountAggCmdOutputAndPlan({filter, expectedStages, expectedCount
validateStages({cmdObj, expectedStages, isAgg: true});
}
+function getExpectedStagesIndexScanAndFetch(extraStages) {
+ const clustered = ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo());
+ const result = clustered ? {"CLUSTERED_IXSCAN": 1} : {"FETCH": 1, "IXSCAN": 1};
+ for (const stage in extraStages) {
+ result[stage] = extraStages[stage];
+ }
+ return result;
+}
+
assert.commandWorked(coll.createIndex({a: 1, _id: 1}));
// Verify count({a: null}) can be covered by an index. In the simplest case we can use two count
@@ -272,18 +282,18 @@ validateFindCmdOutputAndPlan({
validateSimpleCountCmdOutputAndPlan({
filter: {a: null, _id: 3},
expectedCount: 1,
- expectedStages: {"FETCH": 1, "IXSCAN": 1, "OR": 0, "COUNT_SCAN": 0}
+ expectedStages: getExpectedStagesIndexScanAndFetch({"OR": 0, "COUNT_SCAN": 0}),
});
validateCountAggCmdOutputAndPlan({
filter: {a: null, _id: 3},
expectedCount: 1,
- expectedStages: {"FETCH": 1, "IXSCAN": 1, "OR": 0, "COUNT_SCAN": 0},
+ expectedStages: getExpectedStagesIndexScanAndFetch({"OR": 0, "COUNT_SCAN": 0}),
});
validateFindCmdOutputAndPlan({
filter: {a: null, _id: 3},
projection: {_id: 1},
expectedOutput: [{_id: 3}],
- expectedStages: {"IXSCAN": 1, "FETCH": 1, "PROJECTION_SIMPLE": 1},
+ expectedStages: getExpectedStagesIndexScanAndFetch({"PROJECTION_SIMPLE": 1}),
});
// Verify that if the index is multikey and the query searches for null and empty array values, then
diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js
index deeeb8ee8a3..ebd0505e58d 100644
--- a/jstests/core/profile_update.js
+++ b/jstests/core/profile_update.js
@@ -100,12 +100,9 @@ assert.commandWorked(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upse
profileObj = getLatestProfilerEntry(testDB);
const collectionIsClustered = ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo());
-// A clustered collection has no actual index on _id. While a bounded collection scan is in
-// principle an efficient option, the query planner only defaults to collection scan if no suitable
-// index is available.
-const expectedPlan = collectionIsClustered ? "IXSCAN { a: 1 }" : "IXSCAN { _id: 1 }";
-const expectedKeysExamined = collectionIsClustered ? 1 : 0;
-const expectedDocsExamined = expectedKeysExamined;
+const expectedPlan = collectionIsClustered ? "CLUSTERED_IXSCAN" : "IXSCAN { _id: 1 }";
+const expectedKeysExamined = 0;
+const expectedDocsExamined = collectionIsClustered ? 1 : 0;
const expectedKeysInserted = collectionIsClustered ? 1 : 2;
assert.eq(profileObj.command,
@@ -149,13 +146,17 @@ for (var i = 0; i < indices.length; i++) {
const profileObj = profiles[i];
const index = indices[i];
- // A clustered collection has no actual index on _id. While a bounded collection scan is in
- // principle an efficient option, the query planner only defaults to collection scan if no
- // suitable index is available.
- const expectedPlan = collectionIsClustered ? "IXSCAN { a: 1 }" : "IXSCAN { _id: 1 }";
- const expectedKeysExamined = collectionIsClustered ? 1 : 0;
- const expectedDocsExamined = expectedKeysExamined;
- const expectedKeysInserted = collectionIsClustered ? 1 : 2;
+ let expectedPlan = "IXSCAN { _id: 1 }";
+ let expectedKeysExamined = 0;
+ let expectedDocsExamined = 0;
+ let expectedKeysInserted = 2;
+
+ if (collectionIsClustered) {
+ expectedPlan = "CLUSTERED_IXSCAN";
+ expectedKeysExamined = 0;
+ expectedDocsExamined = (i + 1 == indices.length) ? 1 : 2;
+ expectedKeysInserted = 1;
+ }
assert.eq(
profileObj.command,
diff --git a/jstests/core/timeseries/timeseries_index_partial.js b/jstests/core/timeseries/timeseries_index_partial.js
index 55ece446457..7d97209173e 100644
--- a/jstests/core/timeseries/timeseries_index_partial.js
+++ b/jstests/core/timeseries/timeseries_index_partial.js
@@ -97,10 +97,24 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
// Test creating and using a partial index.
{
- // Make sure the query uses the {a: 1} index.
+ let ixscanInWinningPlan = 0;
+
+ // Make sure the {a: 1} index was considered for this query.
function checkPlan(predicate) {
const explain = coll.find(predicate).explain();
- const scan = getAggPlanStage(explain, 'IXSCAN');
+ let scan = getAggPlanStage(explain, 'IXSCAN');
+ // If scan is not present, check rejected plans
+ if (scan === null) {
+ const rejectedPlans = getRejectedPlans(getAggPlanStage(explain, "$cursor")["$cursor"]);
+ if (rejectedPlans.length === 1) {
+ const scans = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN");
+ if (scans.length === 1) {
+ scan = scans[0];
+ }
+ }
+ } else {
+ ixscanInWinningPlan++;
+ }
const indexes = buckets.getIndexes();
assert(scan,
"Expected an index scan for predicate: " + tojson(predicate) +
@@ -109,7 +123,7 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
}
// Make sure the query results match a collection-scan plan.
function checkResults(predicate) {
- const result = coll.aggregate({$match: predicate}).toArray();
+ const result = coll.aggregate([{$match: predicate}], {hint: {a: 1}}).toArray();
const unindexed =
coll.aggregate([{$_internalInhibitOptimization: {}}, {$match: predicate}]).toArray();
assert.docEq(result, unindexed);
@@ -152,10 +166,6 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
const t1 = ISODate('2000-01-01T00:00:01Z');
const t2 = ISODate('2000-01-01T00:00:02Z');
- // When the collection is sharded, there is an index on time that can win, instead of the
- // partial index. So only check the results in that case, not the plan.
- const check = FixtureHelpers.isSharded(buckets) ? checkResults : checkPlanAndResults;
-
assert.commandWorked(coll.dropIndex({a: 1}));
assert.commandWorked(
coll.createIndex({a: 1}, {partialFilterExpression: {[timeField]: {$lt: t1}}}));
@@ -184,6 +194,7 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
assert.commandWorked(coll.dropIndex({a: 1}));
assert.sameMembers(coll.getIndexes(), extraIndexes);
assert.sameMembers(buckets.getIndexes(), extraBucketIndexes);
+ assert.gt(ixscanInWinningPlan, 0);
}
// Check that partialFilterExpression can use a mixture of metadata, time, and measurement fields,
diff --git a/jstests/core/timeseries/timeseries_index_use.js b/jstests/core/timeseries/timeseries_index_use.js
index b1106e4ead0..ebcea9d8e6b 100644
--- a/jstests/core/timeseries/timeseries_index_use.js
+++ b/jstests/core/timeseries/timeseries_index_use.js
@@ -58,8 +58,9 @@ const generateTest = (useHint) => {
/**
* Creates the index specified by the spec and options, then explains the query to ensure
- * that the created index is used. Runs the query and verifies that the expected number of
- * documents are matched. Finally, deletes the created index.
+ * that the created index is used or was considered by multi-planner.
+ * Runs the query and verifies that the expected number of documents are matched.
+ * Finally, deletes the created index.
*/
const testQueryUsesIndex = function(
filter, numMatches, indexSpec, indexOpts = {}, queryOpts = {}) {
@@ -75,9 +76,23 @@ const generateTest = (useHint) => {
assert.eq(numMatches, query.itcount());
const explain = query.explain();
- const ixscan = getAggPlanStage(explain, "IXSCAN");
- assert.neq(null, ixscan, tojson(explain));
- assert.eq("testIndexName", ixscan.indexName, tojson(ixscan));
+ if (useHint) {
+ const ixscan = getAggPlanStage(explain, "IXSCAN");
+ assert.neq(null, ixscan, tojson(explain));
+ assert.eq("testIndexName", ixscan.indexName, tojson(ixscan));
+ } else {
+ let ixscan = getAggPlanStage(explain, "IXSCAN");
+ // If ixscan is not present, check rejected plans
+ if (ixscan === null) {
+ const rejectedPlans =
+ getRejectedPlans(getAggPlanStage(explain, "$cursor")["$cursor"]);
+ assert.eq(1, rejectedPlans.length);
+ const ixscans = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN");
+ assert.eq(1, ixscans.length);
+ ixscan = ixscans[0];
+ }
+ assert.eq("testIndexName", ixscan.indexName, tojson(ixscan));
+ }
assert.commandWorked(coll.dropIndex("testIndexName"));
};
diff --git a/jstests/sharding/timeseries_query.js b/jstests/sharding/timeseries_query.js
index 4cf3b1c28c5..7a8b835c876 100644
--- a/jstests/sharding/timeseries_query.js
+++ b/jstests/sharding/timeseries_query.js
@@ -122,7 +122,7 @@ function runQuery(
if (expectCollScan) {
assert(isCollscan(sDB, winningPlan));
} else {
- assert(isIxscan(sDB, winningPlan));
+ assert(isIxscan(sDB, winningPlan) || isClusteredIxscan(sDB, winningPlan));
}
});
});
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 1bee034cd0c..6cbacb0c997 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -206,8 +206,6 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
<< "recordId: " << recordIdToSeek);
}
}
-
- return PlanStage::NEED_TIME;
}
if (_lastSeenId.isNull() && _params.direction == CollectionScanParams::FORWARD &&
diff --git a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
index e93e68aa0ca..d0fc5305029 100644
--- a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
+++ b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp
@@ -206,10 +206,6 @@ protected:
if (!_collScan) {
_collScan = std::make_unique<CollectionScan>(
pExpCtx.get(), _collectionPtr, _params, &_ws, _filter.get());
- // The first call to doWork will create the cursor and return NEED_TIME. But it won't
- // actually scan any of the documents that are present in the mock cursor queue.
- ASSERT_EQ(_collScan->doWork(nullptr), PlanStage::NEED_TIME);
- ASSERT_EQ(_getNumDocsTested(), 0);
}
while (true) {
// If the next result is a pause, return it and don't collscan.
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 6902650a1ef..a3a63e66449 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -399,6 +399,11 @@ StatusWith<std::unique_ptr<QuerySolution>> tryToBuildColumnScan(
}
return Status{ErrorCodes::Error{6298502}, "columnstore index is not applicable for this query"};
}
+
+bool collscanIsBounded(const CollectionScanNode* collscan) {
+ return collscan->minRecord || collscan->maxRecord;
+}
+
} // namespace
using std::numeric_limits;
@@ -615,13 +620,23 @@ static BSONObj finishMaxObj(const IndexEntry& indexEntry,
}
}
+std::pair<std::unique_ptr<QuerySolution>, const CollectionScanNode*> buildCollscanSolnWithNode(
+ const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params,
+ int direction = 1) {
+ std::unique_ptr<QuerySolutionNode> solnRoot(
+ QueryPlannerAccess::makeCollectionScan(query, tailable, params, direction));
+ const auto* collscanNode = checked_cast<const CollectionScanNode*>(solnRoot.get());
+ return std::make_pair(
+ QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot)), collscanNode);
+}
+
std::unique_ptr<QuerySolution> buildCollscanSoln(const CanonicalQuery& query,
bool tailable,
const QueryPlannerParams& params,
int direction = 1) {
- std::unique_ptr<QuerySolutionNode> solnRoot(
- QueryPlannerAccess::makeCollectionScan(query, tailable, params, direction));
- return QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot));
+ return buildCollscanSolnWithNode(query, tailable, params, direction).first;
}
std::unique_ptr<QuerySolution> buildWholeIXSoln(
@@ -1518,6 +1533,8 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
"No indexed plans available, and running with 'notablescan'");
}
+ bool clusteredCollection = params.clusteredInfo.has_value();
+
// geoNear and text queries *require* an index.
// Also, if a hint is specified it indicates that we MUST use it.
bool possibleToCollscan =
@@ -1527,21 +1544,23 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan(
return Status(ErrorCodes::NoQueryExecutionPlans, "No query solutions");
}
- if (possibleToCollscan && (collscanRequested || collScanRequired)) {
- auto collscan = buildCollscanSoln(query, isTailable, params);
- if (!collscan && collScanRequired) {
+ if (possibleToCollscan && (collscanRequested || collScanRequired || clusteredCollection)) {
+ auto [collscanSoln, collscanNode] = buildCollscanSolnWithNode(query, isTailable, params);
+ if (!collscanSoln && collScanRequired) {
return Status(ErrorCodes::NoQueryExecutionPlans,
"Failed to build collection scan soln");
}
- if (collscan) {
+
+ if (collscanSoln &&
+ (collscanRequested || collScanRequired || collscanIsBounded(collscanNode))) {
LOGV2_DEBUG(20984,
5,
"Planner: outputting a collection scan",
- "collectionScan"_attr = redact(collscan->toString()));
+ "collectionScan"_attr = redact(collscanSoln->toString()));
SolutionCacheData* scd = new SolutionCacheData();
scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
- collscan->cacheData.reset(scd);
- out.push_back(std::move(collscan));
+ collscanSoln->cacheData.reset(scd);
+ out.push_back(std::move(collscanSoln));
}
}
diff --git a/src/mongo/dbtests/query_stage_batched_delete.cpp b/src/mongo/dbtests/query_stage_batched_delete.cpp
index be980ae883b..f036e5e3d14 100644
--- a/src/mongo/dbtests/query_stage_batched_delete.cpp
+++ b/src/mongo/dbtests/query_stage_batched_delete.cpp
@@ -252,9 +252,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) {
ASSERT_EQUALS(state, PlanStage::NEED_TIME);
// Only delete documents once the current batch reaches targetBatchDocs.
+ nIterations++;
int batch = nIterations / (int)targetBatchDocs;
ASSERT_EQUALS(stats->docsDeleted, targetBatchDocs * batch);
- nIterations++;
}
// There should be 2 more docs deleted by the time the command returns EOF.
@@ -556,7 +556,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) {
// targetBatchDocs.
{
ASSERT_LTE(nDocs, targetBatchDocs);
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_EQ(state, PlanStage::NEED_TIME);
@@ -634,7 +634,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatc
// Stages up to targetBatchDocs - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_EQ(state, PlanStage::NEED_TIME);
@@ -711,10 +711,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsBasic) {
PlanStage::StageState state = PlanStage::NEED_TIME;
WorkingSetID id = WorkingSet::INVALID_ID;
- // Stages up to 'targetBatchDocs' - 1 documents in the buffer. The first work() initiates the
- // collection scan and doesn't fetch a document to stage.
+ // Stages up to 'targetBatchDocs' - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -784,7 +783,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsWithUnlimitedBatc
// Stage a batch of documents (all the documents).
{
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -822,7 +821,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) {
batchedDeleteParams->targetBatchTimeMS = Milliseconds(0);
batchedDeleteParams->targetBatchDocs = targetBatchDocs;
- auto targetPassTimeMS = Milliseconds(3);
+ auto targetPassTimeMS = Milliseconds(targetBatchDocs - 1);
batchedDeleteParams->targetPassTimeMS = targetPassTimeMS;
auto deleteStage =
@@ -835,7 +834,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) {
// Stages the first batch.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -882,7 +881,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSWithUnlimitedBa
// Stages the first batch (all the documents).
{
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -974,10 +973,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSReachedBeforeTa
// Track the total amount of time the pass takes.
Timer passTimer(tickSource());
- // Stages up to 'targetBatchDocs' - 1 documents in the buffer. The first work() initiates the
- // collection scan and doesn't fetch a document to stage.
+ // Stages up to 'targetBatchDocs' - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 2cec88a4e0c..64bb596769c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -565,11 +565,6 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
- WorkingSetID id = WorkingSet::INVALID_ID;
-
- // Check that the resume succeeds in making the cursor.
- ASSERT_EQUALS(PlanStage::NEED_TIME, ps->work(&id));
-
// Run the rest of the scan and verify the results.
auto statusWithPlanExecutor =
plan_executor_factory::make(_expCtx,
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 128426f77ee..6fe3479d4d9 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -325,12 +325,15 @@ public:
class QueryStageCountUpdateDuringYield : public CountStageTest {
public:
void run() {
- // expected count would be kDocuments-2 but we update the first and second records
- // after doing the first unit of work so they wind up getting counted later on
CountCommandRequest request((NamespaceString(ns())));
request.setQuery(BSON("x" << GTE << 2));
- testCount(request, kDocuments);
+ // We call 'interject' after first unit of work that skips the first document, so it is
+ // not counted.
+ testCount(request, kDocuments - 1);
+
+ // We call 'interject' after first unit of work and even if some documents are skipped,
+ // they are added to the end of the index on x so they are counted later.
testCount(request, kDocuments, true);
}