summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorBrett Nawrocki <brett.nawrocki@mongodb.com>2022-06-27 19:26:58 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-29 16:54:43 +0000
commitc007112aaebdf3e1403ee84aa356e07fa084b059 (patch)
treed43a92c78d0a337976bd29845603aa0597162b7a /src/mongo/db
parentadd41e1ac8bf0dbc3be680bdc3e7d49266239e35 (diff)
downloadmongo-c007112aaebdf3e1403ee84aa356e07fa084b059.tar.gz
SERVER-58983 Reanimate ReshardingCollectionClonerTest
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp329
-rw-r--r--src/mongo/db/s/shard_server_test_fixture.cpp15
-rw-r--r--src/mongo/db/s/shard_server_test_fixture.h12
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.cpp10
-rw-r--r--src/mongo/db/s/sharding_mongod_test_fixture.h10
6 files changed, 373 insertions, 4 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 0e9631f111b..c8fbd314d87 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -570,6 +570,7 @@ env.CppUnitTest(
'persistent_task_queue_test.cpp',
'range_deletion_util_test.cpp',
'resharding/resharding_agg_test.cpp',
+ 'resharding/resharding_collection_cloner_test.cpp',
'resharding/resharding_collection_test.cpp',
'resharding/resharding_data_replication_test.cpp',
'resharding/resharding_destined_recipient_test.cpp',
diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp
new file mode 100644
index 00000000000..93557d8b6eb
--- /dev/null
+++ b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp
@@ -0,0 +1,329 @@
+/**
+ * Copyright (C) 2020-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include <vector>
+
+#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/json.h"
+#include "mongo/db/exec/document_value/document_value_test_util.h"
+#include "mongo/db/hasher.h"
+#include "mongo/db/pipeline/document_source_mock.h"
+#include "mongo/db/s/resharding/resharding_collection_cloner.h"
+#include "mongo/db/s/resharding/resharding_metrics.h"
+#include "mongo/db/s/resharding/resharding_util.h"
+#include "mongo/db/s/shard_server_test_fixture.h"
+#include "mongo/db/service_context_test_fixture.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+using Doc = Document;
+using Arr = std::vector<Value>;
+using V = Value;
+
+/**
+ * Mock interface to allow specifying mock results for the 'from' collection of the $lookup stage.
+ */
+class MockMongoInterface final : public StubMongoProcessInterface {
+public:
+ MockMongoInterface(std::deque<DocumentSource::GetNextResult> mockResults)
+ : _mockResults(std::move(mockResults)) {}
+
+ std::unique_ptr<Pipeline, PipelineDeleter> attachCursorSourceToPipeline(
+ Pipeline* ownedPipeline,
+ ShardTargetingPolicy shardTargetingPolicy = ShardTargetingPolicy::kAllowed,
+ boost::optional<BSONObj> readConcern = boost::none) final {
+ std::unique_ptr<Pipeline, PipelineDeleter> pipeline(
+ ownedPipeline, PipelineDeleter(ownedPipeline->getContext()->opCtx));
+
+ pipeline->addInitialSource(
+ DocumentSourceMock::createForTest(_mockResults, pipeline->getContext()));
+ return pipeline;
+ }
+
+private:
+ std::deque<DocumentSource::GetNextResult> _mockResults;
+};
+
+class ReshardingCollectionClonerTest : public ShardServerTestFixtureWithCatalogCacheMock {
+protected:
+ std::unique_ptr<Pipeline, PipelineDeleter> makePipeline(
+ ShardKeyPattern newShardKeyPattern,
+ ShardId recipientShard,
+ std::deque<DocumentSource::GetNextResult> sourceCollectionData,
+ std::deque<DocumentSource::GetNextResult> configCacheChunksData) {
+ auto tempNss = resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID);
+
+ _metrics = ReshardingMetrics::makeInstance(_sourceUUID,
+ newShardKeyPattern.toBSON(),
+ _sourceNss,
+ ReshardingMetrics::Role::kRecipient,
+ getServiceContext()->getFastClockSource()->now(),
+ getServiceContext());
+
+ ReshardingCollectionCloner cloner(_metrics.get(),
+ std::move(newShardKeyPattern),
+ _sourceNss,
+ _sourceUUID,
+ std::move(recipientShard),
+ Timestamp(1, 0), /* dummy value */
+ std::move(tempNss));
+
+ auto pipeline = cloner.makePipeline(
+ operationContext(),
+ std::make_shared<MockMongoInterface>(std::move(configCacheChunksData)));
+
+ pipeline->addInitialSource(DocumentSourceMock::createForTest(
+ std::move(sourceCollectionData), pipeline->getContext()));
+
+ return pipeline;
+ }
+
+ template <class T>
+ auto getHashedElementValue(T value) {
+ return BSONElementHasher::hash64(BSON("" << value).firstElement(),
+ BSONElementHasher::DEFAULT_HASH_SEED);
+ }
+
+ void setUp() override {
+ ShardServerTestFixtureWithCatalogCacheMock::setUp();
+ }
+
+ void tearDown() override {
+ ShardServerTestFixtureWithCatalogCacheMock::tearDown();
+ }
+
+ ChunkManager createChunkManager(
+ const ShardKeyPattern& shardKeyPattern,
+ std::deque<DocumentSource::GetNextResult> configCacheChunksData) {
+ const OID epoch = OID::gen();
+ std::vector<ChunkType> chunks;
+ for (const auto& chunkData : configCacheChunksData) {
+ const auto bson = chunkData.getDocument().toBson();
+ ChunkRange range{bson.getField("_id").Obj().getOwned(),
+ bson.getField("max").Obj().getOwned()};
+ ShardId shard{bson.getField("shard").valueStringDataSafe().toString()};
+ chunks.emplace_back(_sourceUUID,
+ std::move(range),
+ ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
+ std::move(shard));
+ }
+
+ auto rt = RoutingTableHistory::makeNew(_sourceNss,
+ _sourceUUID,
+ shardKeyPattern.getKeyPattern(),
+ nullptr,
+ false,
+ epoch,
+ Timestamp(1, 1),
+ boost::none /* timeseriesFields */,
+ boost::none,
+ boost::none /* chunkSizeBytes */,
+ false,
+ chunks);
+
+ return ChunkManager(_sourceId.getShardId(),
+ _sourceDbVersion,
+ makeStandaloneRoutingTableHistory(std::move(rt)),
+ boost::none);
+ }
+
+private:
+ const NamespaceString _sourceNss = NamespaceString("test"_sd, "collection_being_resharded"_sd);
+ const UUID _sourceUUID = UUID::gen();
+ const ReshardingSourceId _sourceId{UUID::gen(), _myShardName};
+ const DatabaseVersion _sourceDbVersion{UUID::gen(), Timestamp(1, 1)};
+
+ std::unique_ptr<ReshardingMetrics> _metrics;
+};
+
+TEST_F(ReshardingCollectionClonerTest, MinKeyChunk) {
+ ShardKeyPattern sk{fromjson("{x: 1}")};
+ std::deque<DocumentSource::GetNextResult> configData{
+ Doc(fromjson("{_id: {x: {$minKey: 1}}, max: {x: 0.0}, shard: 'myShardName'}")),
+ Doc(fromjson("{_id: {x: 0.0}, max: {x: {$maxKey: 1}}, shard: 'shard2' }"))};
+ getCatalogCacheMock()->setChunkManagerReturnValue(createChunkManager(sk, configData));
+ auto pipeline = makePipeline(std::move(sk),
+ _myShardName,
+ {Doc(fromjson("{_id: 1, x: {$minKey: 1}}")),
+ Doc(fromjson("{_id: 2, x: -0.001}")),
+ Doc(fromjson("{_id: 3, x: NumberLong(0)}")),
+ Doc(fromjson("{_id: 4, x: 0.0}")),
+ Doc(fromjson("{_id: 5, x: 0.001}")),
+ Doc(fromjson("{_id: 6, x: {$maxKey: 1}}"))},
+ std::move(configData));
+
+ auto next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 1 << "x" << MINKEY << "$sortKey" << BSON_ARRAY(1)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 2 << "x" << -0.001 << "$sortKey" << BSON_ARRAY(2)),
+ next->toBson());
+
+ ASSERT_FALSE(pipeline->getNext());
+}
+
+TEST_F(ReshardingCollectionClonerTest, MaxKeyChunk) {
+ ShardKeyPattern sk{fromjson("{x: 1}")};
+ std::deque<DocumentSource::GetNextResult> configData{
+ Doc(fromjson("{_id: {x: {$minKey: 1}}, max: {x: 0.0}, shard: 'myShardName'}")),
+ Doc(fromjson("{_id: {x: 0.0}, max: {x: {$maxKey: 1}}, shard: 'shard2' }")),
+ };
+ getCatalogCacheMock()->setChunkManagerReturnValue(createChunkManager(sk, configData));
+ auto pipeline = makePipeline(std::move(sk),
+ ShardId("shard2"),
+ {Doc(fromjson("{_id: 1, x: {$minKey: 1}}")),
+ Doc(fromjson("{_id: 2, x: -0.001}")),
+ Doc(fromjson("{_id: 3, x: NumberLong(0)}")),
+ Doc(fromjson("{_id: 4, x: 0.0}")),
+ Doc(fromjson("{_id: 5, x: 0.001}")),
+ Doc(fromjson("{_id: 6, x: {$maxKey: 1}}}"))},
+ std::move(configData));
+
+ auto next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 3 << "x" << 0LL << "$sortKey" << BSON_ARRAY(3)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 4 << "x" << 0.0 << "$sortKey" << BSON_ARRAY(4)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 5 << "x" << 0.001 << "$sortKey" << BSON_ARRAY(5)),
+ next->toBson());
+
+ // TODO SERVER-67529: Enable after ChunkManager can handle documents with $maxKey.
+ // next = pipeline->getNext();
+ // ASSERT(next);
+ // ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 6 << "x" << MAXKEY << "$sortKey" << BSON_ARRAY(6)),
+ // next->toBson());
+
+ ASSERT_FALSE(pipeline->getNext());
+}
+
+TEST_F(ReshardingCollectionClonerTest, HashedShardKey) {
+ ShardKeyPattern sk{fromjson("{x: 'hashed'}")};
+ // Documents in a mock config.cache.chunks collection. Mocked collection boundaries:
+ // - [MinKey, hash(0)) : shard1
+ // - [hash(0), hash(0) + 1) : shard2
+ // - [hash(0) + 1, MaxKey] : shard3
+ std::deque<DocumentSource::GetNextResult> configData{
+ Doc{{"_id", Doc{{"x", V(MINKEY)}}},
+ {"max", Doc{{"x", getHashedElementValue(0)}}},
+ {"shard", "shard1"_sd}},
+ Doc{{"_id", Doc{{"x", getHashedElementValue(0)}}},
+ {"max", Doc{{"x", getHashedElementValue(0) + 1}}},
+ {"shard", "shard2"_sd}},
+ Doc{{"_id", Doc{{"x", getHashedElementValue(0) + 1}}},
+ {"max", Doc{{"x", V(MAXKEY)}}},
+ {"shard", "shard3"_sd}}};
+ getCatalogCacheMock()->setChunkManagerReturnValue(createChunkManager(sk, configData));
+ auto pipeline = makePipeline(std::move(sk),
+ ShardId("shard2"),
+ {Doc(fromjson("{_id: 1, x: {$minKey: 1}}")),
+ Doc(fromjson("{_id: 2, x: -1}")),
+ Doc(fromjson("{_id: 3, x: -0.123}")),
+ Doc(fromjson("{_id: 4, x: 0}")),
+ Doc(fromjson("{_id: 5, x: NumberLong(0)}")),
+ Doc(fromjson("{_id: 6, x: 0.123}")),
+ Doc(fromjson("{_id: 7, x: 1}")),
+ Doc(fromjson("{_id: 8, x: {$maxKey: 1}}"))},
+ std::move(configData));
+
+ auto next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 3 << "x" << -0.123 << "$sortKey" << BSON_ARRAY(3)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 4 << "x" << 0 << "$sortKey" << BSON_ARRAY(4)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 5 << "x" << 0LL << "$sortKey" << BSON_ARRAY(5)),
+ next->toBson());
+
+ next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(BSON("_id" << 6 << "x" << 0.123 << "$sortKey" << BSON_ARRAY(6)),
+ next->toBson());
+
+ ASSERT_FALSE(pipeline->getNext());
+}
+
+TEST_F(ReshardingCollectionClonerTest, CompoundHashedShardKey) {
+ ShardKeyPattern sk{fromjson("{x: 'hashed', y: 1}")};
+ // Documents in a mock config.cache.chunks collection. Mocked collection boundaries:
+ // - [{x: MinKey, y: MinKey}, {x: hash(0), y: 0}) : shard1
+ // - [{x: hash(0), y: 0}, {x: hash(0), y: 1}) : shard2
+ // - [{x: hash(0), y: 1}, {x: MaxKey, y: MaxKey}] : shard3
+ std::deque<DocumentSource::GetNextResult> configData{
+ Doc{{"_id", Doc{{"x", V(MINKEY)}, {"y", V(MINKEY)}}},
+ {"max", Doc{{"x", getHashedElementValue(0)}, {"y", 0}}},
+ {"shard", "shard1"_sd}},
+ Doc{{"_id", Doc{{"x", getHashedElementValue(0)}, {"y", 0}}},
+ {"max", Doc{{"x", getHashedElementValue(0)}, {"y", 1}}},
+ {"shard", "shard2"_sd}},
+ Doc{{"_id", Doc{{"x", getHashedElementValue(0)}, {"y", 1}}},
+ {"max", Doc{{"x", V(MAXKEY)}, {"y", V(MAXKEY)}}},
+ {"shard", "shard3"_sd}}};
+ getCatalogCacheMock()->setChunkManagerReturnValue(createChunkManager(sk, configData));
+ auto pipeline = makePipeline(std::move(sk),
+ ShardId("shard2"),
+ {Doc(fromjson("{_id: 1, x: {$minKey: 1}}")),
+ Doc(fromjson("{_id: 2, x: -1}")),
+ Doc(fromjson("{_id: 3, x: -0.123, y: -1}")),
+ Doc(fromjson("{_id: 4, x: 0, y: 0}")),
+ Doc(fromjson("{_id: 5, x: NumberLong(0), y: 1}")),
+ Doc(fromjson("{_id: 6, x: 0.123}")),
+ Doc(fromjson("{_id: 7, x: 1}")),
+ Doc(fromjson("{_id: 8, x: {$maxKey: 1}}"))},
+ std::move(configData));
+
+ auto next = pipeline->getNext();
+ ASSERT(next);
+ ASSERT_BSONOBJ_BINARY_EQ(
+ BSON("_id" << 4 << "x" << 0 << "y" << 0 << "$sortKey" << BSON_ARRAY(4)), next->toBson());
+
+ ASSERT_FALSE(pipeline->getNext());
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/shard_server_test_fixture.cpp b/src/mongo/db/s/shard_server_test_fixture.cpp
index fe4de75a820..0567bd0fb26 100644
--- a/src/mongo/db/s/shard_server_test_fixture.cpp
+++ b/src/mongo/db/s/shard_server_test_fixture.cpp
@@ -94,4 +94,19 @@ std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatal
return std::make_unique<ShardingCatalogClientImpl>();
}
+void ShardServerTestFixtureWithCatalogCacheMock::setUp() {
+ auto loader = std::make_unique<CatalogCacheLoaderMock>();
+ _cacheLoaderMock = loader.get();
+ setCatalogCacheLoader(std::move(loader));
+ ShardServerTestFixture::setUp();
+}
+
+std::unique_ptr<CatalogCache> ShardServerTestFixtureWithCatalogCacheMock::makeCatalogCache() {
+ return std::make_unique<CatalogCacheMock>(getServiceContext(), *_cacheLoaderMock);
+}
+
+CatalogCacheMock* ShardServerTestFixtureWithCatalogCacheMock::getCatalogCacheMock() {
+ return static_cast<CatalogCacheMock*>(catalogCache());
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/shard_server_test_fixture.h b/src/mongo/db/s/shard_server_test_fixture.h
index bd39a90fa4f..39c79947bfc 100644
--- a/src/mongo/db/s/shard_server_test_fixture.h
+++ b/src/mongo/db/s/shard_server_test_fixture.h
@@ -30,6 +30,8 @@
#pragma once
#include "mongo/db/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/catalog_cache_loader_mock.h"
+#include "mongo/s/catalog_cache_mock.h"
namespace mongo {
@@ -69,4 +71,14 @@ protected:
std::unique_ptr<CatalogCacheLoader> _catalogCacheLoader;
};
+class ShardServerTestFixtureWithCatalogCacheMock : public ShardServerTestFixture {
+protected:
+ void setUp() override;
+ virtual std::unique_ptr<CatalogCache> makeCatalogCache() override;
+ CatalogCacheMock* getCatalogCacheMock();
+
+private:
+ CatalogCacheLoaderMock* _cacheLoaderMock;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
index c7b078c89e0..8e445ccce79 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp
@@ -199,6 +199,11 @@ std::unique_ptr<BalancerConfiguration> ShardingMongodTestFixture::makeBalancerCo
return std::make_unique<BalancerConfiguration>();
}
+std::unique_ptr<CatalogCache> ShardingMongodTestFixture::makeCatalogCache() {
+ return std::make_unique<CatalogCache>(getServiceContext(),
+ CatalogCacheLoader::get(getServiceContext()));
+}
+
Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
const ConnectionString& configConnStr) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer ||
@@ -212,12 +217,9 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
executorPoolPtr->startup();
}
- auto catalogCache = std::make_unique<CatalogCache>(
- getServiceContext(), CatalogCacheLoader::get(getServiceContext()));
-
auto const grid = Grid::get(operationContext());
grid->init(makeShardingCatalogClient(),
- std::move(catalogCache),
+ makeCatalogCache(),
makeShardRegistry(configConnStr),
makeClusterCursorManager(),
makeBalancerConfiguration(),
diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.h b/src/mongo/db/s/sharding_mongod_test_fixture.h
index 1c3a64e5a70..c08df77b714 100644
--- a/src/mongo/db/s/sharding_mongod_test_fixture.h
+++ b/src/mongo/db/s/sharding_mongod_test_fixture.h
@@ -58,6 +58,10 @@ protected:
void setUp() override;
void tearDown() override;
+ // Set a catalog cache to be used when initializing the Grid. Must be called before
+ // initializeGlobalShardingStateForMongodForTest() in order to take effect.
+ void setCatalogCache(std::unique_ptr<CatalogCache> cache);
+
/**
* Initializes sharding components according to the cluster role in
* serverGlobalParams.clusterRole and puts the components on the Grid, mimicking the
@@ -123,6 +127,12 @@ protected:
virtual std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration();
/**
+ * Base class returns CatalogCache created from the CatalogCacheLoader set on the
+ * ServiceContext.
+ */
+ virtual std::unique_ptr<CatalogCache> makeCatalogCache();
+
+ /**
* Setups the op observer listeners depending on cluster role.
*/
virtual void setupOpObservers();