summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
authorIvan Fefer <ivan.fefer@mongodb.com>2022-11-02 14:08:39 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-02 14:41:10 +0000
commit76e583ed12d536aba872920568b5324d1dcae713 (patch)
treebf548e3dd26a94e2d38b85924bdfd8293bdc9686 /src/mongo/dbtests
parentf509f99f4e55e38397a7bed3070a715ad6a7dcf9 (diff)
downloadmongo-76e583ed12d536aba872920568b5324d1dcae713.tar.gz
SERVER-58276 Add collscan plan if collection is clustered and collscan uses clustered index
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/query_stage_batched_delete.cpp24
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp9
3 files changed, 17 insertions, 21 deletions
diff --git a/src/mongo/dbtests/query_stage_batched_delete.cpp b/src/mongo/dbtests/query_stage_batched_delete.cpp
index be980ae883b..f036e5e3d14 100644
--- a/src/mongo/dbtests/query_stage_batched_delete.cpp
+++ b/src/mongo/dbtests/query_stage_batched_delete.cpp
@@ -252,9 +252,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) {
ASSERT_EQUALS(state, PlanStage::NEED_TIME);
// Only delete documents once the current batch reaches targetBatchDocs.
+ nIterations++;
int batch = nIterations / (int)targetBatchDocs;
ASSERT_EQUALS(stats->docsDeleted, targetBatchDocs * batch);
- nIterations++;
}
// There should be 2 more docs deleted by the time the command returns EOF.
@@ -556,7 +556,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) {
// targetBatchDocs.
{
ASSERT_LTE(nDocs, targetBatchDocs);
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_EQ(state, PlanStage::NEED_TIME);
@@ -634,7 +634,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatc
// Stages up to targetBatchDocs - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_EQ(state, PlanStage::NEED_TIME);
@@ -711,10 +711,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsBasic) {
PlanStage::StageState state = PlanStage::NEED_TIME;
WorkingSetID id = WorkingSet::INVALID_ID;
- // Stages up to 'targetBatchDocs' - 1 documents in the buffer. The first work() initiates the
- // collection scan and doesn't fetch a document to stage.
+ // Stages up to 'targetBatchDocs' - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -784,7 +783,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsWithUnlimitedBatc
// Stage a batch of documents (all the documents).
{
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -822,7 +821,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) {
batchedDeleteParams->targetBatchTimeMS = Milliseconds(0);
batchedDeleteParams->targetBatchDocs = targetBatchDocs;
- auto targetPassTimeMS = Milliseconds(3);
+ auto targetPassTimeMS = Milliseconds(targetBatchDocs - 1);
batchedDeleteParams->targetPassTimeMS = targetPassTimeMS;
auto deleteStage =
@@ -835,7 +834,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) {
// Stages the first batch.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -882,7 +881,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSWithUnlimitedBa
// Stages the first batch (all the documents).
{
- for (auto i = 0; i <= nDocs; i++) {
+ for (auto i = 0; i < nDocs; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
@@ -974,10 +973,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSReachedBeforeTa
// Track the total amount of time the pass takes.
Timer passTimer(tickSource());
- // Stages up to 'targetBatchDocs' - 1 documents in the buffer. The first work() initiates the
- // collection scan and doesn't fetch a document to stage.
+ // Stages up to 'targetBatchDocs' - 1 documents in the buffer.
{
- for (auto i = 0; i < targetBatchDocs; i++) {
+ for (auto i = 0; i < targetBatchDocs - 1; i++) {
state = deleteStage->work(&id);
ASSERT_EQ(stats->docsDeleted, 0);
ASSERT_FALSE(stats->passTargetMet);
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 2cec88a4e0c..64bb596769c 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -565,11 +565,6 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc
std::unique_ptr<PlanStage> ps = std::make_unique<CollectionScan>(
_expCtx.get(), collection.getCollection(), params, ws.get(), nullptr);
- WorkingSetID id = WorkingSet::INVALID_ID;
-
- // Check that the resume succeeds in making the cursor.
- ASSERT_EQUALS(PlanStage::NEED_TIME, ps->work(&id));
-
// Run the rest of the scan and verify the results.
auto statusWithPlanExecutor =
plan_executor_factory::make(_expCtx,
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 128426f77ee..6fe3479d4d9 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -325,12 +325,15 @@ public:
class QueryStageCountUpdateDuringYield : public CountStageTest {
public:
void run() {
- // expected count would be kDocuments-2 but we update the first and second records
- // after doing the first unit of work so they wind up getting counted later on
CountCommandRequest request((NamespaceString(ns())));
request.setQuery(BSON("x" << GTE << 2));
- testCount(request, kDocuments);
+ // We call 'interject' after first unit of work that skips the first document, so it is
+ // not counted.
+ testCount(request, kDocuments - 1);
+
+ // We call 'interject' after first unit of work and even if some documents are skipped,
+ // they are added to the end of the index on x so they are counted later.
testCount(request, kDocuments, true);
}