summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHaley Connelly <haley.connelly@mongodb.com>2021-10-05 22:28:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-05 23:12:36 +0000
commit2a1be4c86a90eff9023ab3cbf9978d8c105f42d7 (patch)
treec2b853af2976d88a5371a8e31a39303a0d608502 /src
parent867f52afbb79bc00e35c70f8e0681b7d602f97b2 (diff)
downloadmongo-2a1be4c86a90eff9023ab3cbf9978d8c105f42d7.tar.gz
SERVER-60287 Make clustered collection scan respect minimum bound
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/exec/collection_scan.cpp23
-rw-r--r--src/mongo/db/record_id_helpers.cpp6
-rw-r--r--src/mongo/db/record_id_helpers.h1
-rw-r--r--src/mongo/db/storage/key_string.cpp7
-rw-r--r--src/mongo/db/storage/key_string.h1
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp237
6 files changed, 260 insertions, 15 deletions
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 7f3d5bc446c..a1f416f97fd 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -252,13 +252,22 @@ void CollectionScan::assertTsHasNotFallenOffOplog(const Record& record) {
}
namespace {
-bool atEndOfRangeInclusive(const CollectionScanParams& params, const WorkingSetMember& member) {
+bool pastEndOfRange(const CollectionScanParams& params, const WorkingSetMember& member) {
if (params.direction == CollectionScanParams::FORWARD) {
return params.maxRecord && member.recordId > *params.maxRecord;
} else {
return params.minRecord && member.recordId < *params.minRecord;
}
}
+
+bool beforeStartOfRange(const CollectionScanParams& params, const WorkingSetMember& member) {
+ if (params.direction == CollectionScanParams::FORWARD) {
+ return params.minRecord && member.recordId < *params.minRecord;
+ } else {
+ return params.maxRecord && member.recordId > *params.maxRecord;
+ }
+}
+
} // namespace
PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
@@ -269,13 +278,21 @@ PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
// The 'minRecord' and 'maxRecord' bounds are always inclusive, even if the query predicate is
// an exclusive inequality like $gt or $lt. In such cases, we rely on '_filter' to either
// exclude or include the endpoints as required by the user's query.
- if (atEndOfRangeInclusive(_params, *member)) {
+ if (pastEndOfRange(_params, *member)) {
_workingSet->free(memberID);
_commonStats.isEOF = true;
return PlanStage::IS_EOF;
}
- if (Filter::passes(member, _filter)) {
+ // For clustered collections, seekNear() is allowed to return a record prior to the
+ // requested minRecord for a forward scan or after the requested maxRecord for a reverse
+ // scan. Ensure that we do not return a record out of the requested range. Require that the
+ // caller advance our cursor until it is positioned within the correct range.
+ //
+ // In the future, we could change seekNear() to always return a record after minRecord in the
+ // direction of the scan. However, tailable scans depend on the current behavior in order to
+ // mark their position for resuming the tailable scan later on.
+ if (!beforeStartOfRange(_params, *member) && Filter::passes(member, _filter)) {
if (_params.stopApplyingFilterAfterFirstMatch) {
_filter = nullptr;
}
diff --git a/src/mongo/db/record_id_helpers.cpp b/src/mongo/db/record_id_helpers.cpp
index 103326a9f6a..e3dee4e66bc 100644
--- a/src/mongo/db/record_id_helpers.cpp
+++ b/src/mongo/db/record_id_helpers.cpp
@@ -109,6 +109,12 @@ RecordId keyForOID(OID oid) {
return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize());
}
+RecordId keyForDate(Date_t date) {
+ KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion);
+ keyBuilder.appendDate(date);
+ return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize());
+}
+
void appendToBSONAs(RecordId rid, BSONObjBuilder* builder, StringData fieldName) {
rid.withFormat([&](RecordId::Null) { builder->appendNull(fieldName); },
[&](int64_t val) { builder->append(fieldName, val); },
diff --git a/src/mongo/db/record_id_helpers.h b/src/mongo/db/record_id_helpers.h
index 7efe36ada9b..4e4e2d108f7 100644
--- a/src/mongo/db/record_id_helpers.h
+++ b/src/mongo/db/record_id_helpers.h
@@ -51,6 +51,7 @@ StatusWith<RecordId> keyForOptime(const Timestamp& opTime);
StatusWith<RecordId> keyForDoc(const BSONObj& doc);
RecordId keyForElem(const BSONElement& elem);
RecordId keyForOID(OID oid);
+RecordId keyForDate(Date_t date);
/**
* data and len must be the arguments from RecordStore::insert() on an oplog collection.
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index eb69641f957..e9e6321d0be 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -415,6 +415,13 @@ void BuilderBase<BufferT>::appendOID(OID oid) {
}
template <class BufferT>
+void BuilderBase<BufferT>::appendDate(Date_t date) {
+ _verifyAppendingState();
+ _appendDate(date, _shouldInvertOnAppend());
+ _elemCount++;
+}
+
+template <class BufferT>
void BuilderBase<BufferT>::appendDiscriminator(const Discriminator discriminator) {
// The discriminator forces this KeyString to compare Less/Greater than any KeyString with
// the same prefix of keys. As an example, this can be used to land on the first key in the
diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h
index 87a6b74c742..36da68fc3e4 100644
--- a/src/mongo/db/storage/key_string.h
+++ b/src/mongo/db/storage/key_string.h
@@ -551,6 +551,7 @@ public:
void appendBinData(const BSONBinData& data);
void appendSetAsArray(const BSONElementSet& set, const StringTransformFn& f = nullptr);
void appendOID(OID oid);
+ void appendDate(Date_t date);
/**
* Appends a Discriminator byte and kEnd byte to a key string.
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 487a16f4445..4329eebf9ac 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -31,12 +31,15 @@
* This file tests db/exec/collection_scan.cpp.
*/
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
+
#include "mongo/platform/basic.h"
#include <fmt/printf.h>
#include <memory>
#include "mongo/client/dbclient_cursor.h"
+#include "mongo/db/catalog/clustered_collection_options_gen.h"
#include "mongo/db/catalog/clustered_collection_util.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
@@ -53,6 +56,7 @@
#include "mongo/db/record_id_helpers.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/logv2/log.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/fail_point.h"
@@ -173,7 +177,8 @@ public:
NamespaceString _nss;
};
- ScopedCollectionDeleter makeCollectionClustered(const NamespaceString& ns) {
+ ScopedCollectionDeleter createClusteredCollection(const NamespaceString& ns,
+ bool prePopulate = true) {
AutoGetCollection autoColl(&_opCtx, ns, MODE_IX);
{
@@ -188,14 +193,60 @@ public:
wuow.commit();
}
-
- for (int i = 0; i < numObj(); ++i) {
- _client.insert(ns.ns(), BSON("foo" << i));
+ if (prePopulate) {
+ for (int i = 0; i < numObj(); ++i) {
+ _client.insert(ns.ns(), BSON("foo" << i));
+ }
}
return {&_opCtx, ns};
}
+ void insertDocument(const NamespaceString& ns, const BSONObj& doc) {
+ _client.insert(ns.ns(), doc);
+ }
+
+ // Performs a bounded collection scan from 'minRecord' to 'maxRecord' in the specified
+ // 'direction'. Asserts that the collection scan retrieves 'expectedNumMatches' documents.
+ void runClusteredCollScan(const NamespaceString& ns,
+ CollectionScanParams::Direction direction,
+ RecordId minRecord,
+ RecordId maxRecord,
+ int expectedNumMatches) {
+ AutoGetCollectionForRead autoColl(&_opCtx, ns);
+
+ const CollectionPtr& coll = autoColl.getCollection();
+ ASSERT(coll->isClustered());
+
+ // Configure the scan.
+ CollectionScanParams params;
+ params.tailable = false;
+ params.direction = direction;
+ params.minRecord = minRecord;
+ params.maxRecord = maxRecord;
+
+ WorkingSet ws;
+ auto scan = std::make_unique<CollectionScan>(_expCtx.get(), coll, params, &ws, nullptr);
+
+ int count = 0;
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT(member->hasRecordId());
+ ASSERT(member->hasObj());
+
+ ASSERT_GTE(member->recordId, minRecord);
+ ASSERT_LTE(member->recordId, maxRecord);
+
+ count++;
+ }
+ }
+
+ ASSERT_EQ(count, expectedNumMatches);
+ }
+
static int numObj() {
return 50;
}
@@ -494,7 +545,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMax) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();
@@ -534,9 +585,171 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMax) {
ASSERT_EQ(count, recordIds.size());
}
+// Tests a collection scan with bounds generated from type 'date', on a collection with all entries
+// generated from type 'objectId', exludes all entries.
+TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxBoundsDateTypeNoMatches) {
+ const std::vector<CollectionScanParams::Direction> collScanDirections{
+ CollectionScanParams::FORWARD, CollectionScanParams::BACKWARD};
+
+ for (const auto direction : collScanDirections) {
+ LOGV2(6028700,
+ "Running clustered collection scan test case",
+ "scanDirection"_attr =
+ (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD"));
+
+ auto ns = NamespaceString("a.b");
+
+ // Create a clustered collection pre-populated with RecordIds generated from type
+ // 'objectId'.
+ auto scopedCollectionDeleter = createClusteredCollection(ns);
+
+ // Use bounds that restrict the scan to RecordIds generated from type 'date'.
+ auto minRecord = record_id_helpers::keyForDate(Date_t::min());
+ auto maxRecord = record_id_helpers::keyForDate(Date_t::max());
+
+ // The collection has no records generated with type 'date'. There should be 0 matches.
+ runClusteredCollScan(ns, direction, minRecord, maxRecord, 0);
+ }
+}
+
+// Tests that if the bounds are generated from type 'date', only RecordIds generated with type
+// 'date' are included in the results.
+TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateTypeMatches) {
+ const std::vector<CollectionScanParams::Direction> collScanDirections{
+ CollectionScanParams::FORWARD, CollectionScanParams::BACKWARD};
+
+ for (const auto direction : collScanDirections) {
+ LOGV2(6028701,
+ "Running clustered collection scan test case",
+ "scanDirection"_attr =
+ (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD"));
+
+ auto ns = NamespaceString("a.b");
+
+ // Create a clustered collection pre-populated with RecordIds generated from type
+ // 'objectId'.
+ auto scopedCollectionDeleter = createClusteredCollection(ns);
+
+ auto numDateDocs = 5;
+
+ // Insert documents that generate a RecordId with type 'date'.
+ Date_t now = Date_t::now();
+ for (int i = 0; i < numDateDocs; i++) {
+ insertDocument(ns, BSON("_id" << now - Milliseconds(i)));
+ }
+
+ // Generate bounds from type 'date'.
+ auto minRecord = record_id_helpers::keyForDate(Date_t::min());
+ auto maxRecord = record_id_helpers::keyForDate(Date_t::max());
+
+ // The collection contains RecordIds generated from both type 'objectId' and 'date'. Only
+ // RecordIds that match the bound type should be included in the scan.
+ runClusteredCollScan(ns, direction, minRecord, maxRecord, numDateDocs);
+ }
+}
+
+TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredIgnoreNumericRecordIds) {
+ const std::vector<CollectionScanParams::Direction> collScanDirections{
+ CollectionScanParams::FORWARD, CollectionScanParams::BACKWARD};
+
+ for (const auto direction : collScanDirections) {
+ LOGV2(6028702,
+ "Running clustered collection scan test case",
+ "scanDirection"_attr =
+ (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD"));
+ auto ns = NamespaceString("a.b");
+ auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */);
+
+ int numOIDDocs = 20;
+ // Insert documents with default '_id' values of type 'objectId' used to generate their
+ // RecordIds.
+ for (int i = 0; i < numOIDDocs; i++) {
+ insertDocument(ns, BSON("foo" << i));
+ }
+
+ // Insert documents that generate 'numeric' typed RecordIds.
+ auto numNumericDocs = 10;
+ for (int i = 0; i < numNumericDocs; i++) {
+ insertDocument(ns, BSON("_id" << i));
+ }
+
+ // Use bounds that will include every 'objectId' typed record.
+ auto minRecord = record_id_helpers::keyForOID(OID());
+ auto maxRecord = record_id_helpers::keyForOID(OID::max());
+
+ // Only records generated from type 'objectId' should result from the scan.
+ runClusteredCollScan(ns, direction, minRecord, maxRecord, numOIDDocs);
+ }
+}
+
+// Test exclusive filters work for date typed collection scan bounds.
+TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateExclusive) {
+ const std::vector<CollectionScanParams::Direction> collScanDirections{
+ CollectionScanParams::FORWARD, CollectionScanParams::BACKWARD};
+
+ for (const auto direction : collScanDirections) {
+ LOGV2(6028703,
+ "Running clustered collection scan test case",
+ "scanDirection"_attr =
+ (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD"));
+
+ auto ns = NamespaceString("a.b");
+
+ auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */);
+
+ AutoGetCollectionForRead autoColl(&_opCtx, ns);
+ const CollectionPtr& coll = autoColl.getCollection();
+
+ Date_t maxDate = Date_t::now();
+ Date_t middleDate = maxDate - Milliseconds(1);
+ Date_t minDate = middleDate - Milliseconds(1);
+ std::vector<BSONObj> dateDocuments = {
+ BSON("_id" << minDate), BSON("_id" << middleDate), BSON("_id" << maxDate)};
+ for (auto doc : dateDocuments) {
+ insertDocument(ns, doc);
+ }
+
+ CollectionScanParams params;
+ params.tailable = false;
+ params.direction = direction;
+
+ params.minRecord = record_id_helpers::keyForDate(minDate);
+ params.maxRecord = record_id_helpers::keyForDate(maxDate);
+
+ // Exclude all but the record with _id 'middleDate' from the scan.
+ StatusWithMatchExpression swMatch = MatchExpressionParser::parse(
+ BSON("_id" << BSON("$gt" << minDate << "$lt" << maxDate)), _expCtx.get());
+
+ ASSERT_OK(swMatch.getStatus());
+ auto filter = std::move(swMatch.getValue());
+
+ WorkingSet ws;
+ auto scan =
+ std::make_unique<CollectionScan>(_expCtx.get(), coll, params, &ws, filter.get());
+
+ int count = 0;
+ while (!scan->isEOF()) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = scan->work(&id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = ws.get(id);
+ ASSERT(member->hasRecordId());
+ ASSERT(member->hasObj());
+
+ ASSERT_NOT_EQUALS(member->recordId, record_id_helpers::keyForDate(maxDate));
+ ASSERT_NOT_EQUALS(member->recordId, record_id_helpers::keyForDate(minDate));
+ count++;
+ }
+ }
+
+ // Verify the min and max bounds are excluded.
+ ASSERT_EQ(count, dateDocuments.size() - 2);
+ }
+}
+
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();
@@ -578,9 +791,9 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) {
ASSERT_EQ(count, recordIds.size());
}
-TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredNonExistentRecordIds) {
+TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxFullObjectIdRange) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();
@@ -596,7 +809,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredNonExistentRecord
params.direction = CollectionScanParams::FORWARD;
params.tailable = false;
- // Use RecordIds that don't exist. Expect to see all records.
+ // Expect to see all records.
params.minRecord = record_id_helpers::keyForOID(OID());
params.maxRecord = record_id_helpers::keyForOID(OID::max());
@@ -624,7 +837,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredNonExistentRecord
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();
@@ -673,7 +886,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) {
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusive) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();
@@ -733,7 +946,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi
TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusiveReverse) {
auto ns = NamespaceString("a.b");
- auto collDeleter = makeCollectionClustered(ns);
+ auto collDeleter = createClusteredCollection(ns);
AutoGetCollectionForRead autoColl(&_opCtx, ns);
const CollectionPtr& coll = autoColl.getCollection();