summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Williams <louis.williams@mongodb.com>2021-03-18 10:45:30 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-03-18 15:53:49 +0000
commit926ee400907f538c8eb240d97cb292797cd60571 (patch)
treef42275a316a114643801347b8666d6a764d9c624
parentce5cb5c681dbf954d229367f543c0b89a4cbc6d7 (diff)
downloadmongo-926ee400907f538c8eb240d97cb292797cd60571.tar.gz
SERVER-52528 Test queries on sharded time-series collections
-rw-r--r--jstests/noPassthroughWithMongod/timeseries_create.js2
-rw-r--r--jstests/sharding/timeseries_sharded_query.js192
-rw-r--r--src/mongo/db/catalog/database_impl.cpp3
-rw-r--r--src/mongo/db/index/index_access_method.cpp19
-rw-r--r--src/mongo/db/ops/insert.cpp2
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/shardsvr_create_collection_command.cpp2
8 files changed, 208 insertions, 16 deletions
diff --git a/jstests/noPassthroughWithMongod/timeseries_create.js b/jstests/noPassthroughWithMongod/timeseries_create.js
index 6ff50d9df40..bfa413c0d05 100644
--- a/jstests/noPassthroughWithMongod/timeseries_create.js
+++ b/jstests/noPassthroughWithMongod/timeseries_create.js
@@ -57,8 +57,6 @@ const testOptions = function(allowed,
bucketsColl);
}
- assert.commandFailedWithCode(testDB.runCommand({drop: bucketsCollName}),
- ErrorCodes.IllegalOperation);
assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}));
} else {
assert.commandFailedWithCode(res, errorCode);
diff --git a/jstests/sharding/timeseries_sharded_query.js b/jstests/sharding/timeseries_sharded_query.js
new file mode 100644
index 00000000000..3867cfbb2f7
--- /dev/null
+++ b/jstests/sharding/timeseries_sharded_query.js
@@ -0,0 +1,192 @@
+/**
+ * Tests that time-series collections can be sharded and that queries return correct results.
+ *
+ * @tags: [
+ * requires_fcv_49,
+ * requires_find_command,
+ * ]
+ */
+
+(function() {
+load("jstests/core/timeseries/libs/timeseries.js");
+load("jstests/sharding/libs/find_chunks_util.js");
+
+Random.setRandomSeed();
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+const dbName = 'test';
+const sDB = st.s.getDB(dbName);
+const configDB = st.s0.getDB('config');
+
+if (!TimeseriesTest.timeseriesCollectionsEnabled(st.shard0)) {
+ jsTestLog("Skipping test because the time-series collection feature flag is disabled");
+ st.stop();
+ return;
+}
+
+// Simple shard key on the metadata field.
+(function metaShardKey() {
+ assert.commandWorked(
+ sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'hostId'}}));
+
+ // Insert directly on the primary shard because mongos does not know how to insert into a TS
+ // collection.
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ const tsColl = st.shard0.getDB(dbName).ts;
+ const numDocs = 20;
+ let docs = [];
+ for (let i = 0; i < numDocs; i++) {
+ const doc = {
+ time: ISODate(),
+ hostId: i,
+ _id: i,
+ data: Random.rand(),
+ };
+ docs.push(doc);
+ assert.commandWorked(tsColl.insert(doc));
+ }
+
+ // This index gets created as {meta: 1} on the buckets collection.
+ assert.commandWorked(tsColl.createIndex({hostId: 1}));
+
+ st.shardColl('system.buckets.ts',
+ {meta: 1} /* Shard key */,
+ {meta: 10} /* Split at */,
+ {meta: 10} /* Move the chunk containing {meta: 10} to its own shard */,
+ dbName, /* dbName */
+ true /* Wait until documents orphaned by the move get deleted */);
+
+ let counts = st.chunkCounts('system.buckets.ts', 'test');
+ assert.eq(1, counts[st.shard0.shardName]);
+ assert.eq(1, counts[st.shard1.shardName]);
+
+ // Query with shard key
+ assert.docEq([docs[0]], sDB.ts.find({hostId: 0}).toArray());
+ assert.docEq([docs[numDocs - 1]], sDB.ts.find({hostId: (numDocs - 1)}).toArray());
+
+ // Query without shard key
+ assert.docEq(docs, sDB.ts.find().sort({time: 1}).toArray());
+
+ assert.commandWorked(sDB.dropDatabase());
+})();
+
+// Create a time-series collection with a non-default collation, but an index with the simple
+// collation, which makes it eligible as a shard key.
+(function metaShardKeyCollation() {
+ assert.commandWorked(sDB.createCollection('ts', {
+ timeseries: {timeField: 'time', metaField: 'hostName'},
+ collation: {locale: 'en', strength: 1, numericOrdering: true}
+ }));
+
+ // Insert directly on the primary shard because mongos does not know how to insert into a TS
+ // collection.
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ const tsColl = st.shard0.getDB(dbName).ts;
+
+ const numDocs = 20;
+ let docs = [];
+ for (let i = 0; i < numDocs; i++) {
+ const doc = {
+ time: ISODate(),
+ hostName: 'host_' + i,
+ _id: i,
+ data: Random.rand(),
+ };
+ docs.push(doc);
+ assert.commandWorked(tsColl.insert(doc));
+ }
+
+ // This index gets created as {meta: 1} on the buckets collection.
+ assert.commandWorked(tsColl.createIndex({hostName: 1}, {collation: {locale: 'simple'}}));
+
+ st.shardColl('system.buckets.ts',
+ {meta: 1} /* Shard key */,
+ {meta: 'host_10'} /* Split at */,
+ {meta: 'host_10'} /* Move the chunk containing {meta: 10} to its own shard */,
+ dbName, /* dbName */
+ true /* Wait until documents orphaned by the move get deleted */);
+
+ let counts = st.chunkCounts('system.buckets.ts', 'test');
+ assert.eq(1, counts[st.shard0.shardName]);
+ assert.eq(1, counts[st.shard1.shardName]);
+
+ // Query with shard key
+ assert.docEq([docs[0]], sDB.ts.find({hostName: 'host_0'}).toArray());
+ assert.docEq([docs[numDocs - 1]], sDB.ts.find({hostName: 'host_' + (numDocs - 1)}).toArray());
+
+ // Query without shard key
+ assert.docEq(docs, sDB.ts.find().sort({time: 1}).toArray());
+ assert.commandWorked(sDB.dropDatabase());
+})();
+
+// Create a time-series collection with a shard key compounded with a metadata subfield and time.
+(function compoundShardKey() {
+ assert.commandWorked(
+ sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'meta'}}));
+
+ // Insert directly on the primary shard because mongos does not know how to insert into a TS
+ // collection.
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+
+ const tsColl = st.shard0.getDB(dbName).ts;
+ const numDocs = 20;
+ let docs = [];
+ for (let i = 0; i < numDocs; i++) {
+ const doc = {
+ time: ISODate(),
+ meta: {id: i},
+ _id: i,
+ data: Random.rand(),
+ };
+ docs.push(doc);
+ assert.commandWorked(tsColl.insert(doc));
+ }
+
+ // This index gets created as {meta.id: 1, control.min.time: 1, control.max.time: 1} on the
+ // buckets collection.
+ assert.commandWorked(tsColl.createIndex({'meta.id': 'hashed', time: 1}));
+
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(st.s.adminCommand({
+ shardCollection: 'test.system.buckets.ts',
+ key: {'meta.id': 'hashed', 'control.min.time': 1, 'control.max.time': 1}
+ }));
+
+ let counts = st.chunkCounts('system.buckets.ts', 'test');
+ assert.eq(1, counts[st.shard0.shardName], counts);
+ assert.eq(0, counts[st.shard1.shardName], counts);
+
+ // Split the chunk based on 'bounds' and verify total chunks increased by one.
+ const lowestChunk = findChunksUtil.findChunksByNs(configDB, 'test.system.buckets.ts')
+ .sort({min: 1})
+ .limit(1)
+ .next();
+ assert(lowestChunk);
+
+ assert.commandWorked(st.s.adminCommand(
+ {split: 'test.system.buckets.ts', bounds: [lowestChunk.min, lowestChunk.max]}));
+
+ let otherShard = st.getOther(st.getPrimaryShard(dbName)).name;
+ assert.commandWorked(st.s.adminCommand({
+ movechunk: 'test.system.buckets.ts',
+ find: {'meta.id': 10, 'control.min.time': 0, 'control.max.time': 0},
+ to: otherShard,
+ _waitForDelete: true
+ }));
+
+ counts = st.chunkCounts('system.buckets.ts', 'test');
+ assert.eq(1, counts[st.shard0.shardName], counts);
+ assert.eq(1, counts[st.shard1.shardName], counts);
+
+ // Query with shard key
+ assert.docEq([docs[0]], sDB.ts.find({'meta.id': 0}).toArray());
+ assert.docEq([docs[numDocs - 1]], sDB.ts.find({'meta.id': (numDocs - 1)}).toArray());
+
+ // Query without shard key
+ assert.docEq(docs, sDB.ts.find().sort({time: 1}).toArray());
+ assert.commandWorked(sDB.dropDatabase());
+})();
+
+st.stop();
+})();
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 9e4ee7c16ee..dc3a991bfac 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -351,7 +351,8 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
} else if (!(nss.isSystemDotViews() || nss.isHealthlog() ||
nss == NamespaceString::kLogicalSessionsNamespace ||
nss == NamespaceString::kKeysCollectionNamespace ||
- nss.isTemporaryReshardingCollection())) {
+ nss.isTemporaryReshardingCollection() ||
+ nss.isTimeseriesBucketsCollection())) {
return Status(ErrorCodes::IllegalOperation,
str::stream() << "can't drop system collection " << nss);
}
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 486a6740012..004fb931f75 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -728,16 +728,17 @@ Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx,
// Assert that keys are retrieved from the sorter in non-decreasing order, but only in debug
// builds since this check can be expensive.
int cmpData;
- if (kDebugBuild || _descriptor->unique()) {
+ if (_descriptor->unique()) {
cmpData = data.first.compareWithoutRecordId(previousKey);
- if (cmpData < 0) {
- LOGV2_FATAL_NOTRACE(
- 31171,
- "Expected the next key to be greater than or equal to the previous key",
- "nextKey"_attr = data.first.toString(),
- "previousKey"_attr = previousKey.toString(),
- "index"_attr = _descriptor->indexName());
- }
+ }
+
+ if (kDebugBuild && data.first.compare(previousKey) < 0) {
+ LOGV2_FATAL_NOTRACE(
+ 31171,
+ "Expected the next key to be greater than or equal to the previous key",
+ "nextKey"_attr = data.first.toString(),
+ "previousKey"_attr = previousKey.toString(),
+ "index"_attr = _descriptor->indexName());
}
// Before attempting to insert, perform a duplicate key check.
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index ad8002a7ee2..2903bdcc6e5 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -229,7 +229,7 @@ Status userAllowedCreateNS(const NamespaceString& ns) {
// 'config.system.sessions', there will be a corresponding persisted chunk metadata
// collection 'config.cache.chunks.config.system.sessions'. We wish to allow writes to this
// collection.
- if (ns.coll().find(".system.sessions") != std::string::npos) {
+ if (ns.isConfigDotCacheDotChunks()) {
return Status::OK();
}
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 40344c84aa2..613aa25d6d0 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -84,7 +84,7 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
"can't shard system namespaces",
!nss.isSystem() || nss == NamespaceString::kLogicalSessionsNamespace ||
- nss.isTemporaryReshardingCollection());
+ nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection());
// Ensure numInitialChunks is within valid bounds.
// Cannot have more than 8192 initial chunks per shard. Setting a maximum of 1,000,000
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index f565463572d..705b6646b48 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -397,7 +397,7 @@ void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx
uassert(ErrorCodes::IllegalOperation,
"can't shard system namespaces",
!_nss.isSystem() || _nss == NamespaceString::kLogicalSessionsNamespace ||
- _nss.isTemporaryReshardingCollection());
+ _nss.isTemporaryReshardingCollection() || _nss.isTimeseriesBucketsCollection());
if (_request.getNumInitialChunks()) {
// Ensure numInitialChunks is within valid bounds.
diff --git a/src/mongo/db/s/shardsvr_create_collection_command.cpp b/src/mongo/db/s/shardsvr_create_collection_command.cpp
index ba3285d398f..e1179cd0421 100644
--- a/src/mongo/db/s/shardsvr_create_collection_command.cpp
+++ b/src/mongo/db/s/shardsvr_create_collection_command.cpp
@@ -119,7 +119,7 @@ CreateCollectionResponse createCollectionLegacy(OperationContext* opCtx,
uassert(ErrorCodes::IllegalOperation,
"can't shard system namespaces",
!nss.isSystem() || nss == NamespaceString::kLogicalSessionsNamespace ||
- nss.isTemporaryReshardingCollection());
+ nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection());
auto optNumInitialChunks = request.getNumInitialChunks();
if (optNumInitialChunks) {