summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-02-15 18:12:53 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-02-28 16:04:51 -0500
commit6ba514b246e9163202a8a58b1fee10b329995904 (patch)
tree350a4ed4405ff96209eeb701353adce62db079d9
parent009752a819e0d53317ec517f94d9aabf3061445b (diff)
downloadmongo-6ba514b246e9163202a8a58b1fee10b329995904.tar.gz
SERVER-28030 Remove writes from ChunkManager
Also moves the 'chunks' tests from dbtests into the ChunkManager unit-tests. (cherry picked from commit 585ade2ae5f776effa2c63c2221a4639f8545bc2)
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/dbtests/chunktests.cpp503
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp160
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp40
-rw-r--r--src/mongo/s/chunk_manager.cpp168
-rw-r--r--src/mongo/s/chunk_manager.h31
-rw-r--r--src/mongo/s/chunk_manager_test.cpp701
-rw-r--r--src/mongo/s/config.cpp24
10 files changed, 674 insertions, 956 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index cafeabe4537..dcb179b846c 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -97,6 +97,7 @@ env.Library(
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/bson/util/bson_extract',
'$BUILD_DIR/mongo/db/common',
+ '$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/coreshard',
],
)
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index b82f16fdb15..9037b0a883d 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -53,7 +53,6 @@ dbtest = env.Program(
target="dbtest",
source=[
'basictests.cpp',
- 'chunktests.cpp',
'clienttests.cpp',
'commandtests.cpp',
'counttests.cpp',
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
deleted file mode 100644
index 047d765e431..00000000000
--- a/src/mongo/dbtests/chunktests.cpp
+++ /dev/null
@@ -1,503 +0,0 @@
-/**
- * Copyright (C) 2012 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/json.h"
-#include "mongo/db/query/collation/collator_interface_mock.h"
-#include "mongo/db/query/query_test_service_context.h"
-#include "mongo/dbtests/dbtests.h"
-#include "mongo/s/chunk_manager.h"
-#include "mongo/stdx/memory.h"
-
-namespace mongo {
-
-using std::set;
-using std::string;
-using std::vector;
-
-class TestableChunkManager : public ChunkManager {
-public:
- TestableChunkManager(const string& ns,
- const ShardKeyPattern& keyPattern,
- std::unique_ptr<CollatorInterface> defaultCollator,
- bool unique)
- : ChunkManager(ns, keyPattern, std::move(defaultCollator), unique) {}
-
- void setSingleChunkForShards(const vector<BSONObj>& splitPoints) {
- vector<BSONObj> mySplitPoints(splitPoints);
- mySplitPoints.insert(mySplitPoints.begin(), _keyPattern.getKeyPattern().globalMin());
- mySplitPoints.push_back(_keyPattern.getKeyPattern().globalMax());
-
- for (unsigned i = 1; i < mySplitPoints.size(); ++i) {
- const string shardId = str::stream() << (i - 1);
- _shardIds.insert(shardId);
-
- ChunkType chunk;
- chunk.setNS(getns());
- chunk.setMin(mySplitPoints[i - 1]);
- chunk.setMax(mySplitPoints[i]);
- chunk.setShard(shardId);
- chunk.setVersion(ChunkVersion(0, 0, OID()));
-
- _chunkMap[mySplitPoints[i]] = std::make_shared<Chunk>(chunk);
- }
-
- _chunkRangeMap = _constructRanges(_chunkMap);
- }
-};
-
-namespace {
-
-class Base {
-public:
- Base() = default;
-
- virtual ~Base() = default;
-
- void run() {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- ShardKeyPattern shardKeyPattern(shardKey());
- TestableChunkManager chunkManager(
- "TestDB.TestColl", shardKeyPattern, defaultCollator(), false);
- chunkManager.setSingleChunkForShards(splitPoints());
-
- set<ShardId> shardIds;
- chunkManager.getShardIdsForQuery(opCtx.get(), query(), queryCollation(), &shardIds);
-
- BSONArrayBuilder b;
- for (const ShardId& shardId : shardIds) {
- b << shardId;
- }
- ASSERT_BSONOBJ_EQ(expectedShardNames(), b.arr());
- }
-
-protected:
- virtual BSONObj shardKey() const {
- return BSON("a" << 1);
- }
-
- virtual std::unique_ptr<CollatorInterface> defaultCollator() const {
- return {nullptr};
- }
-
- virtual std::vector<BSONObj> splitPoints() const {
- return {};
- }
-
- virtual BSONObj query() const {
- return BSONObj();
- }
-
- virtual BSONObj queryCollation() const {
- return BSONObj();
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0");
- }
-};
-
-class EmptyQuerySingleShard : public Base {};
-
-class MultiShardBase : public Base {
- virtual std::vector<BSONObj> splitPoints() const {
- return {BSON("a"
- << "x"),
- BSON("a"
- << "y"),
- BSON("a"
- << "z")};
- }
-};
-
-class EmptyQueryMultiShard : public MultiShardBase {
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2"
- << "3");
- }
-};
-
-class UniversalRangeMultiShard : public EmptyQueryMultiShard {
- virtual BSONObj query() const {
- return BSON("b" << 1);
- }
-};
-
-class EqualityRangeSingleShard : public EmptyQuerySingleShard {
- virtual BSONObj query() const {
- return BSON("a"
- << "x");
- }
-};
-
-class EqualityRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a"
- << "y");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("2");
- }
-};
-
-class SetRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson("{a:{$in:['u','y']}}");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "2");
- }
-};
-
-class GTRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << GT << "x");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("1"
- << "2"
- << "3");
- }
-};
-
-class GTERangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << GTE << "x");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("1"
- << "2"
- << "3");
- }
-};
-
-class LTRangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << LT << "y");
- }
-
- /**
- * It isn't actually necessary to return shard 2 because its lowest key is "y", which
- * is excluded from the query. SERVER-4791
- */
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2");
- }
-};
-
-class LTERangeMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << LTE << "y");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2");
- }
-};
-
-class OrEqualities : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson("{$or:[{a:'u'},{a:'y'}]}");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "2");
- }
-};
-
-class OrEqualityInequality : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson("{$or:[{a:'u'},{a:{$gte:'y'}}]}");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "2"
- << "3");
- }
-};
-
-class OrEqualityInequalityUnhelpful : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson("{$or:[{a:'u'},{a:{$gte:'zz'}},{}]}");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2"
- << "3");
- }
-};
-
-template <class BASE>
-class Unsatisfiable : public BASE {
- /**
- * SERVER-4914 For now the first shard is returned for unsatisfiable queries, as some
- * clients of getShardIdsForQuery() expect at least one shard.
- */
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0");
- }
-};
-
-class UnsatisfiableRangeSingleShard : public Unsatisfiable<Base> {
- virtual BSONObj query() const {
- return BSON("a" << GT << "x" << LT << "x");
- }
-};
-
-class UnsatisfiableRangeMultiShard : public Unsatisfiable<MultiShardBase> {
- virtual BSONObj query() const {
- return BSON("a" << GT << "x" << LT << "x");
- }
-};
-
-class EqualityThenUnsatisfiable : public Unsatisfiable<Base> {
- virtual BSONObj shardKey() const {
- return BSON("a" << 1 << "b" << 1);
- }
-
- virtual BSONObj query() const {
- return BSON("a" << 1 << "b" << GT << 4 << LT << 4);
- }
-};
-
-class InequalityThenUnsatisfiable : public Unsatisfiable<Base> {
- virtual BSONObj shardKey() const {
- return BSON("a" << 1 << "b" << 1);
- }
-
- virtual BSONObj query() const {
- return BSON("a" << GT << 1 << "b" << GT << 4 << LT << 4);
- }
-};
-
-class OrEqualityUnsatisfiableInequality : public MultiShardBase {
- virtual BSONObj query() const {
- return fromjson("{$or:[{a:'x'},{a:{$gt:'u',$lt:'u'}},{a:{$gte:'y'}}]}");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("1"
- << "2"
- << "3");
- }
-};
-
-class CompoundKeyBase : public Base {
- virtual BSONObj shardKey() const {
- return BSON("a" << 1 << "b" << 1);
- }
-
- virtual std::vector<BSONObj> splitPoints() const {
- return {BSON("a" << 5 << "b" << 10), BSON("a" << 5 << "b" << 20)};
- }
-};
-
-class InMultiShard : public CompoundKeyBase {
- virtual BSONObj query() const {
- return BSON("a" << BSON("$in" << BSON_ARRAY(0 << 5 << 10)) << "b"
- << BSON("$in" << BSON_ARRAY(0 << 5 << 25)));
- }
-
- // If we were to send this query to just the shards it actually needed to hit, it would
- // only hit shards 0 and 2. Because of the optimization from SERVER-4745, however, we'll
- // also hit shard 1.
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2");
- }
-};
-
-class CollationStringsMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a"
- << "y");
- }
-
- virtual BSONObj queryCollation() const {
- return BSON("locale"
- << "mock_reverse_string");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2"
- << "3");
- }
-};
-
-class DefaultCollationStringsMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a"
- << "y");
- }
-
- virtual std::unique_ptr<CollatorInterface> defaultCollator() const {
- auto collator = stdx::make_unique<CollatorInterfaceMock>(
- CollatorInterfaceMock::MockType::kReverseString);
- return {std::move(collator)};
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0"
- << "1"
- << "2"
- << "3");
- }
-};
-
-class SimpleCollationStringsMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a"
- << "y");
- }
-
- virtual std::unique_ptr<CollatorInterface> defaultCollator() const {
- auto collator = stdx::make_unique<CollatorInterfaceMock>(
- CollatorInterfaceMock::MockType::kReverseString);
- return {std::move(collator)};
- }
-
- virtual BSONObj queryCollation() const {
- return BSON("locale"
- << "simple");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("2");
- }
-};
-
-class CollationNumbersMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << 5);
- }
-
- virtual BSONObj queryCollation() const {
- return BSON("locale"
- << "mock_reverse_string");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0");
- }
-};
-
-class DefaultCollationNumbersMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << 5);
- }
-
- virtual std::unique_ptr<CollatorInterface> defaultCollator() const {
- auto collator = stdx::make_unique<CollatorInterfaceMock>(
- CollatorInterfaceMock::MockType::kReverseString);
- return {std::move(collator)};
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0");
- }
-};
-
-class SimpleCollationNumbersMultiShard : public MultiShardBase {
- virtual BSONObj query() const {
- return BSON("a" << 5);
- }
-
- virtual std::unique_ptr<CollatorInterface> defaultCollator() const {
- auto collator = stdx::make_unique<CollatorInterfaceMock>(
- CollatorInterfaceMock::MockType::kReverseString);
- return {std::move(collator)};
- }
-
- virtual BSONObj queryCollation() const {
- return BSON("locale"
- << "simple");
- }
-
- virtual BSONArray expectedShardNames() const {
- return BSON_ARRAY("0");
- }
-};
-
-class All : public Suite {
-public:
- All() : Suite("chunk") {}
-
- void setupTests() {
- add<EmptyQuerySingleShard>();
- add<EmptyQueryMultiShard>();
- add<UniversalRangeMultiShard>();
- add<EqualityRangeSingleShard>();
- add<EqualityRangeMultiShard>();
- add<SetRangeMultiShard>();
- add<GTRangeMultiShard>();
- add<GTERangeMultiShard>();
- add<LTRangeMultiShard>();
- add<LTERangeMultiShard>();
- add<OrEqualities>();
- add<OrEqualityInequality>();
- add<OrEqualityInequalityUnhelpful>();
- add<UnsatisfiableRangeSingleShard>();
- add<UnsatisfiableRangeMultiShard>();
- add<EqualityThenUnsatisfiable>();
- add<InequalityThenUnsatisfiable>();
- add<OrEqualityUnsatisfiableInequality>();
- add<InMultiShard>();
- add<CollationStringsMultiShard>();
- add<DefaultCollationStringsMultiShard>();
- add<SimpleCollationStringsMultiShard>();
- add<CollationNumbersMultiShard>();
- add<DefaultCollationNumbersMultiShard>();
- add<SimpleCollationNumbersMultiShard>();
- }
-};
-
-SuiteInstance<All> myAll;
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index ed7574032d7..225a49ccb02 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -239,6 +239,7 @@ env.Library(
'$BUILD_DIR/mongo/db/lasterror',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_global',
'$BUILD_DIR/mongo/executor/task_executor_pool',
+ '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client',
'$BUILD_DIR/mongo/s/query/cluster_cursor_manager',
'client/sharding_client',
'common',
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 01bb420f49a..2c75c95b488 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -55,6 +55,7 @@
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
+#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_changelog.h"
@@ -64,7 +65,6 @@
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
-#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
@@ -117,6 +117,108 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
response->setOk(false);
}
+/**
+ * Creates and writes to the config server the first chunks for a newly sharded collection. Returns
+ * the version generated for the collection.
+ */
+StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
+ const NamespaceString& nss,
+ const ShardKeyPattern& shardKeyPattern,
+ const ShardId& primaryShardId,
+ const std::vector<BSONObj>& initPoints,
+ const std::set<ShardId>& initShardIds) {
+
+ const KeyPattern keyPattern = shardKeyPattern.getKeyPattern();
+
+ vector<BSONObj> splitPoints;
+ vector<ShardId> shardIds;
+
+ if (initPoints.empty()) {
+ // If no split points were specified use the shard's data distribution to determine them
+ auto primaryShard =
+ uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, primaryShardId));
+
+ auto result = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
+ nss.db().toString(),
+ BSON("count" << nss.coll()),
+ Shard::RetryPolicy::kIdempotent));
+
+ long long numObjects = 0;
+ uassertStatusOK(result.commandStatus);
+ uassertStatusOK(bsonExtractIntegerField(result.response, "n", &numObjects));
+
+ if (numObjects > 0) {
+ splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
+ txn,
+ primaryShardId,
+ nss,
+ shardKeyPattern,
+ ChunkRange(keyPattern.globalMin(), keyPattern.globalMax()),
+ Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ 0));
+ }
+
+ // Since docs already exist for the collection, must use primary shard
+ shardIds.push_back(primaryShardId);
+ } else {
+ // Make sure points are unique and ordered
+ auto orderedPts = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
+
+ for (const auto& initPoint : initPoints) {
+ orderedPts.insert(initPoint);
+ }
+
+ for (const auto& initPoint : orderedPts) {
+ splitPoints.push_back(initPoint);
+ }
+
+ if (initShardIds.empty()) {
+ // If not specified, only use the primary shard (note that it's not safe for mongos to
+ // put initial chunks on other shards without the primary mongod knowing)
+ shardIds.push_back(primaryShardId);
+ } else {
+ std::copy(initShardIds.begin(), initShardIds.end(), std::back_inserter(shardIds));
+ }
+ }
+
+ // This is the first chunk; start the versioning from scratch
+ const OID epoch = OID::gen();
+ ChunkVersion version(1, 0, epoch);
+
+ log() << "going to create " << splitPoints.size() + 1 << " chunk(s) for: " << nss
+ << " using new epoch " << version.epoch();
+
+ for (unsigned i = 0; i <= splitPoints.size(); i++) {
+ const BSONObj min = (i == 0) ? keyPattern.globalMin() : splitPoints[i - 1];
+ const BSONObj max = (i < splitPoints.size()) ? splitPoints[i] : keyPattern.globalMax();
+
+ // The correct version must be returned as part of this call so only increment for versions,
+ // which get written
+ if (i > 0) {
+ version.incMinor();
+ }
+
+ ChunkType chunk;
+ chunk.setNS(nss.ns());
+ chunk.setMin(min);
+ chunk.setMax(max);
+ chunk.setShard(shardIds[i % shardIds.size()]);
+ chunk.setVersion(version);
+
+ Status status = Grid::get(txn)->catalogClient(txn)->insertConfigDocument(
+ txn, ChunkType::ConfigNS, chunk.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ if (!status.isOK()) {
+ return {status.code(),
+ str::stream() << "Creating first chunks failed due to "
+ << redact(status.reason())};
+ }
+ }
+
+ return version;
+}
+
} // namespace
ShardingCatalogClientImpl::ShardingCatalogClientImpl(
@@ -421,29 +523,28 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
return getDBStatus.getStatus();
}
+ auto const shardRegistry = Grid::get(txn)->shardRegistry();
+
ShardId dbPrimaryShardId = getDBStatus.getValue().value.getPrimary();
- const auto primaryShardStatus = grid.shardRegistry()->getShard(txn, dbPrimaryShardId);
+ const auto primaryShardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
if (!primaryShardStatus.isOK()) {
return primaryShardStatus.getStatus();
}
{
- // In 3.0 and prior we include this extra safety check that the collection is not getting
- // sharded concurrently by two different mongos instances. It is not 100%-proof, but it
- // reduces the chance that two invocations of shard collection will step on each other's
- // toes. Now we take the distributed lock so going forward this check won't be necessary
- // but we leave it around for compatibility with other mongoses from 3.0.
- // TODO(spencer): Remove this after 3.2 ships.
+ // This is an extra safety check that there aren't any partially written chunks from a
+ // previous failed invocation of 'shardCollection'
auto countStatus = _runCountCommandOnConfig(
txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
+
if (countStatus.getValue() > 0) {
- return Status(ErrorCodes::AlreadyInitialized,
- str::stream() << "collection " << ns << " already sharded with "
- << countStatus.getValue()
- << " chunks.");
+ return {ErrorCodes::AlreadyInitialized,
+ str::stream() << "collection " << ns << " already sharded with "
+ << countStatus.getValue()
+ << " chunks."};
}
}
@@ -470,6 +571,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
ShardingCatalogClientImpl::kMajorityWriteConcern);
}
+ const NamespaceString nss(ns);
+
// Construct the collection default collator.
std::unique_ptr<CollatorInterface> defaultCollator;
if (!defaultCollation.isEmpty()) {
@@ -481,28 +584,25 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
defaultCollator = std::move(statusWithCollator.getValue());
}
- shared_ptr<ChunkManager> manager(
- new ChunkManager(ns, fieldsAndOrder, std::move(defaultCollator), unique));
- Status createFirstChunksStatus =
- manager->createFirstChunks(txn, dbPrimaryShardId, &initPoints, &initShardIds);
+ auto createFirstChunksStatus =
+ createFirstChunks(txn, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, initShardIds);
if (!createFirstChunksStatus.isOK()) {
- return createFirstChunksStatus;
+ return createFirstChunksStatus.getStatus();
}
- manager->loadExistingRanges(txn, nullptr);
+
+ const auto& collVersion = createFirstChunksStatus.getValue();
{
CollectionType coll;
- coll.setNs(NamespaceString(manager->getns()));
- coll.setEpoch(manager->getVersion().epoch());
+ coll.setNs(nss);
+ coll.setEpoch(collVersion.epoch());
// TODO(schwerin): The following isn't really a date, but is stored as one in-memory and in
// config.collections, as a historical oddity.
- coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(manager->getVersion().toLong()));
- coll.setKeyPattern(manager->getShardKeyPattern().toBSON());
- coll.setDefaultCollation(manager->getDefaultCollator()
- ? manager->getDefaultCollator()->getSpec().toBSON()
- : BSONObj());
- coll.setUnique(manager->isUnique());
+ coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(collVersion.toLong()));
+ coll.setKeyPattern(fieldsAndOrder.toBSON());
+ coll.setDefaultCollation(defaultCollator ? defaultCollator->getSpec().toBSON() : BSONObj());
+ coll.setUnique(unique);
Status updateCollStatus = updateCollection(txn, ns, coll);
if (!updateCollStatus.isOK()) {
@@ -514,14 +614,14 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// TODO: Think the real fix here is for mongos to just
// assume that all collections are sharded, when we get there
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
- grid.shardRegistry()->getConfigServerConnectionString(),
+ shardRegistry->getConfigServerConnectionString(),
dbPrimaryShardId,
primaryShardStatus.getValue()->getConnString(),
NamespaceString(ns),
- manager->getVersion(),
+ collVersion,
true);
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, dbPrimaryShardId);
+ auto shardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -543,7 +643,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
logChange(txn,
"shardCollection.end",
ns,
- BSON("version" << manager->getVersion().toString()),
+ BSON("version" << collVersion.toString()),
ShardingCatalogClientImpl::kMajorityWriteConcern);
return Status::OK();
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index 920714e4ad3..74006f5de6a 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -154,37 +154,6 @@ public:
return actualVersion;
}
- void expectReloadChunks(const std::string& ns, const vector<ChunkType>& chunks) {
- onFindCommand([&](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(configHost, request.target);
- ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata,
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- BSONObj expectedQuery =
- BSON(ChunkType::ns(ns) << ChunkType::DEPRECATED_lastmod << GTE << Timestamp());
- BSONObj expectedSort = BSON(ChunkType::DEPRECATED_lastmod() << 1);
-
- ASSERT_EQ(ChunkType::ConfigNS, query->ns());
- ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
- ASSERT_BSONOBJ_EQ(expectedSort, query->getSort());
- ASSERT_FALSE(query->getLimit().is_initialized());
-
- checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
-
- vector<BSONObj> chunksToReturn;
-
- std::transform(chunks.begin(),
- chunks.end(),
- std::back_inserter(chunksToReturn),
- [](const ChunkType& chunk) { return chunk.toBSON(); });
- return chunksToReturn;
- });
- }
-
void expectUpdateCollection(const CollectionType& expectedCollection) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
@@ -387,9 +356,6 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
// written, to avoid problems relating to non-matching epochs down the road.
expectedChunk.setVersion(actualVersion);
- // Handle the query to load the newly created chunk
- expectReloadChunks(ns, {expectedChunk});
-
CollectionType expectedCollection;
expectedCollection.setNs(NamespaceString(ns));
expectedCollection.setEpoch(expectedChunk.getVersion().epoch());
@@ -580,9 +546,6 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
expectedChunk.setVersion(actualVersion);
}
- // Handle the query to load the newly created chunk
- expectReloadChunks(ns, expectedChunks);
-
CollectionType expectedCollection;
expectedCollection.setNs(NamespaceString(ns));
expectedCollection.setEpoch(expectedChunks[4].getVersion().epoch());
@@ -776,9 +739,6 @@ TEST_F(ShardCollectionTest, withInitialData) {
expectedChunk.setVersion(actualVersion);
}
- // Handle the query to load the newly created chunk
- expectReloadChunks(ns, expectedChunks);
-
CollectionType expectedCollection;
expectedCollection.setNs(NamespaceString(ns));
expectedCollection.setEpoch(expectedChunks[4].getVersion().epoch());
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 8c8a03bccbc..afddc3158d5 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -35,6 +35,7 @@
#include <boost/next_prior.hpp>
#include <map>
#include <set>
+#include <vector>
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
@@ -42,16 +43,13 @@
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/query/collation/collation_index_key.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_diff.h"
#include "mongo/s/client/shard_registry.h"
@@ -69,7 +67,6 @@ using std::set;
using std::shared_ptr;
using std::string;
using std::unique_ptr;
-using std::vector;
namespace {
@@ -166,41 +163,20 @@ bool isChunkMapValid(const ChunkMap& chunkMap) {
} // namespace
-ChunkManager::ChunkManager(const string& ns,
- const ShardKeyPattern& pattern,
+ChunkManager::ChunkManager(const NamespaceString& nss,
+ const OID& epoch,
+ const ShardKeyPattern& shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique)
: _sequenceNumber(nextCMSequenceNumber.addAndFetch(1)),
- _ns(ns),
- _keyPattern(pattern.getKeyPattern()),
+ _ns(nss.ns()),
+ _keyPattern(shardKeyPattern.getKeyPattern()),
_defaultCollator(std::move(defaultCollator)),
_unique(unique),
_chunkMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<std::shared_ptr<Chunk>>()),
_chunkRangeMap(
- SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<ShardAndChunkRange>()) {}
-
-ChunkManager::ChunkManager(OperationContext* txn, const CollectionType& coll)
- : _sequenceNumber(nextCMSequenceNumber.addAndFetch(1)),
- _ns(coll.getNs().ns()),
- _keyPattern(coll.getKeyPattern()),
- _unique(coll.getUnique()),
-
- _chunkMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<std::shared_ptr<Chunk>>()),
- _chunkRangeMap(
- SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<ShardAndChunkRange>()) {
- // coll does not have correct version. Use same initial version as _load and createFirstChunks.
- _version = ChunkVersion(0, 0, coll.getEpoch());
-
- if (!coll.getDefaultCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
- ->makeFromBSON(coll.getDefaultCollation());
-
- // The collation was validated upon collection creation.
- invariantOK(statusWithCollator.getStatus());
-
- _defaultCollator = std::move(statusWithCollator.getValue());
- }
-}
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<ShardAndChunkRange>()),
+ _version(0, 0, epoch) {}
ChunkManager::~ChunkManager() = default;
@@ -286,14 +262,14 @@ bool ChunkManager::_load(OperationContext* txn,
repl::OpTime opTime;
std::vector<ChunkType> chunks;
- uassertStatusOK(
- grid.catalogClient(txn)->getChunks(txn,
- diffQuery.query,
- diffQuery.sort,
- boost::none,
- &chunks,
- &opTime,
- repl::ReadConcernLevel::kMajorityReadConcern));
+ uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
+ txn,
+ diffQuery.query,
+ diffQuery.sort,
+ boost::none,
+ &chunks,
+ &opTime,
+ repl::ReadConcernLevel::kMajorityReadConcern));
invariant(opTime >= _configOpTime);
_configOpTime = opTime;
@@ -305,7 +281,7 @@ bool ChunkManager::_load(OperationContext* txn,
// Add all existing shards we find to the shards set
for (ShardVersionMap::iterator it = shardVersions->begin(); it != shardVersions->end();) {
- auto shardStatus = grid.shardRegistry()->getShard(txn, it->first);
+ auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, it->first);
if (shardStatus.isOK()) {
shardIds.insert(it->first);
++it;
@@ -355,112 +331,6 @@ bool ChunkManager::_load(OperationContext* txn,
}
}
-void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
- const ShardId& primaryShardId,
- const vector<BSONObj>* initPoints,
- const set<ShardId>* initShardIds,
- vector<BSONObj>* splitPoints,
- vector<ShardId>* shardIds) const {
- invariant(_chunkMap.empty());
-
- if (!initPoints || initPoints->empty()) {
- // discover split points
- auto primaryShard = uassertStatusOK(grid.shardRegistry()->getShard(txn, primaryShardId));
- const NamespaceString nss{getns()};
-
- auto result = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
- nss.db().toString(),
- BSON("count" << nss.coll()),
- Shard::RetryPolicy::kIdempotent));
-
- long long numObjects = 0;
- uassertStatusOK(result.commandStatus);
- uassertStatusOK(bsonExtractIntegerField(result.response, "n", &numObjects));
-
- if (numObjects > 0) {
- *splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
- txn,
- primaryShardId,
- NamespaceString(_ns),
- _keyPattern,
- ChunkRange(_keyPattern.getKeyPattern().globalMin(),
- _keyPattern.getKeyPattern().globalMax()),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
- 0));
- }
-
- // since docs already exists, must use primary shard
- shardIds->push_back(primaryShardId);
- } else {
- // make sure points are unique and ordered
- auto orderedPts = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
- for (unsigned i = 0; i < initPoints->size(); ++i) {
- BSONObj pt = (*initPoints)[i];
- orderedPts.insert(pt);
- }
- for (auto it = orderedPts.begin(); it != orderedPts.end(); ++it) {
- splitPoints->push_back(*it);
- }
-
- if (!initShardIds || !initShardIds->size()) {
- // If not specified, only use the primary shard (note that it's not safe for mongos
- // to put initial chunks on other shards without the primary mongod knowing).
- shardIds->push_back(primaryShardId);
- } else {
- std::copy(initShardIds->begin(), initShardIds->end(), std::back_inserter(*shardIds));
- }
- }
-}
-
-Status ChunkManager::createFirstChunks(OperationContext* txn,
- const ShardId& primaryShardId,
- const vector<BSONObj>* initPoints,
- const set<ShardId>* initShardIds) {
- // TODO distlock?
- // TODO: Race condition if we shard the collection and insert data while we split across
- // the non-primary shard.
-
- vector<BSONObj> splitPoints;
- vector<ShardId> shardIds;
- calcInitSplitsAndShards(txn, primaryShardId, initPoints, initShardIds, &splitPoints, &shardIds);
-
- // this is the first chunk; start the versioning from scratch
- ChunkVersion version(1, 0, OID::gen());
-
- log() << "going to create " << splitPoints.size() + 1 << " chunk(s) for: " << _ns
- << " using new epoch " << version.epoch();
-
- for (unsigned i = 0; i <= splitPoints.size(); i++) {
- BSONObj min = i == 0 ? _keyPattern.getKeyPattern().globalMin() : splitPoints[i - 1];
- BSONObj max =
- i < splitPoints.size() ? splitPoints[i] : _keyPattern.getKeyPattern().globalMax();
-
- ChunkType chunk;
- chunk.setNS(_ns);
- chunk.setMin(min);
- chunk.setMax(max);
- chunk.setShard(shardIds[i % shardIds.size()]);
- chunk.setVersion(version);
-
- Status status = grid.catalogClient(txn)->insertConfigDocument(
- txn, ChunkType::ConfigNS, chunk.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
- if (!status.isOK()) {
- const string errMsg = str::stream() << "Creating first chunks failed: "
- << redact(status.reason());
- error() << errMsg;
- return Status(status.code(), errMsg);
- }
-
- version.incMinor();
- }
-
- _version = ChunkVersion(0, 0, version.epoch());
-
- return Status::OK();
-}
-
StatusWith<shared_ptr<Chunk>> ChunkManager::findIntersectingChunk(OperationContext* txn,
const BSONObj& shardKey,
const BSONObj& collation) const {
@@ -654,7 +524,7 @@ IndexBounds ChunkManager::getIndexBoundsForQuery(const BSONObj& key,
IndexBounds bounds;
- for (vector<QuerySolution*>::const_iterator it = solutions.begin();
+ for (std::vector<QuerySolution*>::const_iterator it = solutions.begin();
bounds.size() == 0 && it != solutions.end();
it++) {
// Try next solution if we failed to generate index bounds, i.e. bounds.size() == 0
@@ -693,7 +563,7 @@ IndexBounds ChunkManager::collapseQuerySolution(const QuerySolutionNode* node) {
}
IndexBounds bounds;
- for (vector<QuerySolutionNode*>::const_iterator it = node->children.begin();
+ for (std::vector<QuerySolutionNode*>::const_iterator it = node->children.begin();
it != node->children.end();
it++) {
// The first branch under OR
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 4cd6700cacf..f2d75fdb0b0 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -31,7 +31,6 @@
#include <map>
#include <set>
#include <string>
-#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/query/collation/collator_interface.h"
@@ -46,8 +45,8 @@
namespace mongo {
class CanonicalQuery;
-class CollectionType;
struct QuerySolutionNode;
+class NamespaceString;
class OperationContext;
// The key for the map is max for each Chunk or ChunkRange
@@ -57,11 +56,9 @@ class ChunkManager {
MONGO_DISALLOW_COPYING(ChunkManager);
public:
- ChunkManager(OperationContext* txn, const CollectionType& coll);
-
- // Creates an empty chunk manager for the namespace
- ChunkManager(const std::string& ns,
- const ShardKeyPattern& pattern,
+ ChunkManager(const NamespaceString& nss,
+ const OID& epoch,
+ const ShardKeyPattern& shardKeyPattern,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique);
@@ -91,29 +88,9 @@ public:
return _sequenceNumber;
}
- //
- // After constructor is invoked, we need to call loadExistingRanges. If this is a new
- // sharded collection, we can call createFirstChunks first.
- //
-
- // Creates new chunks based on info in chunk manager
- Status createFirstChunks(OperationContext* txn,
- const ShardId& primaryShardId,
- const std::vector<BSONObj>* initPoints,
- const std::set<ShardId>* initShardIds);
-
// Loads existing ranges based on info in chunk manager
void loadExistingRanges(OperationContext* txn, const ChunkManager* oldManager);
-
- // Helpers for load
- void calcInitSplitsAndShards(OperationContext* txn,
- const ShardId& primaryShardId,
- const std::vector<BSONObj>* initPoints,
- const std::set<ShardId>* initShardIds,
- std::vector<BSONObj>* splitPoints,
- std::vector<ShardId>* shardIds) const;
-
//
// Methods to use once loaded / created
//
diff --git a/src/mongo/s/chunk_manager_test.cpp b/src/mongo/s/chunk_manager_test.cpp
index 4f3e3e87524..b0599fcf690 100644
--- a/src/mongo/s/chunk_manager_test.cpp
+++ b/src/mongo/s/chunk_manager_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2009 10gen Inc.
+ * Copyright (C) 2017 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,260 +17,553 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
#include "mongo/platform/basic.h"
+#include <set>
+
#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/db/operation_context_noop.h"
+#include "mongo/db/client.h"
+#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/s/catalog/sharding_catalog_test_fixture.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
-
-using std::unique_ptr;
-using std::set;
-using std::string;
-using std::vector;
+namespace {
using executor::RemoteCommandResponse;
using executor::RemoteCommandRequest;
-namespace {
-
-static int rand(int max = -1) {
- static unsigned seed = 1337;
-
-#if !defined(_WIN32)
- int r = rand_r(&seed);
-#else
- int r = ::rand(); // seed not used in this case
-#endif
+const NamespaceString kNss("TestDB", "TestColl");
- // Modding is bad, but don't really care in this case
- return max > 0 ? r % max : r;
-}
-
-class ChunkManagerFixture : public ShardingCatalogTestFixture {
-public:
+class ChunkManagerTestFixture : public ShardingCatalogTestFixture {
+protected:
void setUp() override {
ShardingCatalogTestFixture::setUp();
setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ configTargeter()->setFindHostReturnValue(HostAndPort{CONFIG_HOST_PORT});
}
-protected:
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
- static const ShardId _shardId;
- static const string _collName;
- static const string _dbName;
-
- static const int numSplitPoints = 100;
-
- void genUniqueRandomSplitKeys(const string& keyName, vector<BSONObj>* splitKeys) {
- stdx::unordered_set<int> uniquePoints;
- while (static_cast<int>(uniquePoints.size()) < numSplitPoints) {
- uniquePoints.insert(rand(numSplitPoints * 10));
+ /**
+ * Returns a chunk manager with chunks at the specified split points. Each individual chunk is
+ * placed on a separate shard with id ranging from "0" to the number of chunks.
+ */
+ std::unique_ptr<ChunkManager> makeChunkManager(
+ const ShardKeyPattern& shardKeyPattern,
+ std::unique_ptr<CollatorInterface> defaultCollator,
+ bool unique,
+ const std::vector<BSONObj>& splitPoints) {
+ ChunkVersion version(1, 0, OID::gen());
+
+ std::vector<BSONObj> shards;
+ std::vector<BSONObj> initialChunks;
+
+ auto splitPointsIncludingEnds(splitPoints);
+ splitPointsIncludingEnds.insert(splitPointsIncludingEnds.begin(),
+ shardKeyPattern.getKeyPattern().globalMin());
+ splitPointsIncludingEnds.push_back(shardKeyPattern.getKeyPattern().globalMax());
+
+ for (size_t i = 1; i < splitPointsIncludingEnds.size(); ++i) {
+ ShardType shard;
+ shard.setName(str::stream() << (i - 1));
+ shard.setHost(str::stream() << "Host" << (i - 1) << ":12345");
+
+ shards.push_back(shard.toBSON());
+
+ ChunkType chunk;
+ chunk.setNS(kNss.ns());
+ chunk.setMin(shardKeyPattern.getKeyPattern().extendRangeBound(
+ splitPointsIncludingEnds[i - 1], false));
+ chunk.setMax(shardKeyPattern.getKeyPattern().extendRangeBound(
+ splitPointsIncludingEnds[i], false));
+ chunk.setShard(shard.getName());
+ chunk.setVersion(version);
+
+ initialChunks.push_back(chunk.toBSON());
+
+ version.incMajor();
}
- for (auto it = uniquePoints.begin(); it != uniquePoints.end(); ++it) {
- splitKeys->push_back(BSON(keyName << *it));
- }
- }
- void expectInsertOnConfigSaveChunkAndReturnOk(std::vector<BSONObj>& chunks) {
- onCommandWithMetadata([&](const RemoteCommandRequest& request) mutable {
- ASSERT_EQ(request.target, HostAndPort(CONFIG_HOST_PORT));
- ASSERT_EQ(request.dbname, "config");
+ // Load the initial manager
+ auto manager = stdx::make_unique<ChunkManager>(
+ kNss, version.epoch(), shardKeyPattern, std::move(defaultCollator), unique);
+
+ auto future = launchAsync([&manager] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThread("Test");
+ auto opCtx = cc().makeOperationContext();
+ manager->loadExistingRanges(opCtx.get(), nullptr);
+ });
- // Get "inserted" chunk doc from RemoteCommandRequest.
- BatchedCommandRequest batchedCommandRequest(BatchedCommandRequest::BatchType_Insert);
- string errmsg;
- batchedCommandRequest.parseBSON(_dbName, request.cmdObj, &errmsg);
- vector<BSONObj> docs = batchedCommandRequest.getInsertRequest()->getDocuments();
- BSONObj chunk = docs.front();
+ expectFindOnConfigSendBSONObjVector(initialChunks);
+ expectFindOnConfigSendBSONObjVector(shards);
- // Save chunk (mimic "insertion").
- chunks.push_back(chunk);
+ future.timed_get(kFutureTimeout);
- return RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(1));
- });
+ return manager;
}
+};
- void expectInsertOnConfigCheckMetadataAndReturnOk(set<int>& minorVersions, OID& epoch) {
- onCommandWithMetadata([&](const RemoteCommandRequest& request) mutable {
- ASSERT_EQ(request.target, HostAndPort(CONFIG_HOST_PORT));
- ASSERT_EQ(request.dbname, "config");
+using ChunkManagerLoadTest = ChunkManagerTestFixture;
- // Get "inserted" chunk doc from RemoteCommandRequest.
- BatchedCommandRequest batchedCommandRequest(BatchedCommandRequest::BatchType_Insert);
- string errmsg;
- batchedCommandRequest.parseBSON(_dbName, request.cmdObj, &errmsg);
- vector<BSONObj> docs = batchedCommandRequest.getInsertRequest()->getDocuments();
- BSONObj chunk = docs.front();
+TEST_F(ChunkManagerLoadTest, IncrementalLoadAfterSplit) {
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version = ChunkVersion::fromBSON(chunk, ChunkType::DEPRECATED_lastmod());
+ auto initialManager(makeChunkManager(shardKeyPattern, nullptr, true, {}));
- // Check chunk's major version.
- ASSERT(version.majorVersion() == 1);
+ ChunkVersion version = initialManager->getVersion();
- // Check chunk's minor version is unique.
- ASSERT(minorVersions.find(version.minorVersion()) == minorVersions.end());
- minorVersions.insert(version.minorVersion());
+ CollectionType collType;
+ collType.setNs(kNss);
+ collType.setEpoch(version.epoch());
+ collType.setUpdatedAt(jsTime());
+ collType.setKeyPattern(shardKeyPattern.toBSON());
+ collType.setUnique(false);
- // Check chunk's epoch is consistent.
- ASSERT(version.epoch().isSet());
- if (!epoch.isSet()) {
- epoch = version.epoch();
- }
- ASSERT(version.epoch() == epoch);
+ ChunkManager manager(kNss, version.epoch(), shardKeyPattern, nullptr, true);
- // Check chunk's shard id.
- ASSERT(chunk[ChunkType::shard()].String() == _shardId.toString());
+ auto future =
+ launchAsync([&] { manager.loadExistingRanges(operationContext(), initialManager.get()); });
- return RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(1));
- });
- }
-};
+ // Return set of chunks, which represent a split
+ expectFindOnConfigSendBSONObjVector([&]() {
+ version.incMajor();
+
+ ChunkType chunk1;
+ chunk1.setNS(kNss.ns());
+ chunk1.setMin(shardKeyPattern.getKeyPattern().globalMin());
+ chunk1.setMax(BSON("_id" << 0));
+ chunk1.setShard({"0"});
+ chunk1.setVersion(version);
-const ShardId ChunkManagerFixture::_shardId{"shard0000"};
-const string ChunkManagerFixture::_collName{"foo.bar"};
-const string ChunkManagerFixture::_dbName{"foo"};
+ version.incMinor();
-// Rename the fixture so that our tests have a useful name in the executable
-typedef ChunkManagerFixture ChunkManagerTests;
+ ChunkType chunk2;
+ chunk2.setNS(kNss.ns());
+ chunk2.setMin(BSON("_id" << 0));
+ chunk2.setMax(shardKeyPattern.getKeyPattern().globalMax());
+ chunk2.setShard({"0"});
+ chunk2.setVersion(version);
+
+ return std::vector<BSONObj>{chunk1.toBSON(), chunk2.toBSON()};
+ }());
+
+ future.timed_get(kFutureTimeout);
+}
/**
- * Tests loading chunks into a ChunkManager with or without an old ChunkManager.
+ * Fixture to be used as a shortcut for tests which exercise the getShardIdsForQuery routing logic
*/
-TEST_F(ChunkManagerTests, Basic) {
- OperationContextNoop txn;
- string keyName = "_id";
- vector<BSONObj> splitKeys;
- genUniqueRandomSplitKeys(keyName, &splitKeys);
- ShardKeyPattern shardKeyPattern(BSON(keyName << 1));
- std::unique_ptr<CollatorInterface> defaultCollator;
-
- std::vector<BSONObj> shards{
- BSON(ShardType::name() << _shardId << ShardType::host()
- << ConnectionString(HostAndPort("hostFooBar:27017")).toString())};
-
- // Generate and save a set of chunks with metadata using a temporary ChunkManager.
-
- std::vector<BSONObj> chunks;
- auto future = launchAsync([&] {
- ChunkManager manager(_collName, shardKeyPattern, std::move(defaultCollator), false);
- auto status = manager.createFirstChunks(operationContext(), _shardId, &splitKeys, NULL);
- ASSERT_OK(status);
- });
-
- // Call the expect() one extra time since numChunks = numSplits + 1.
- for (int i = 0; i < static_cast<int>(splitKeys.size()) + 1; i++) {
- expectInsertOnConfigSaveChunkAndReturnOk(chunks);
+class ChunkManagerQueryTest : public ChunkManagerTestFixture {
+protected:
+ void runQueryTest(const BSONObj& shardKey,
+ std::unique_ptr<CollatorInterface> defaultCollator,
+ bool unique,
+ const std::vector<BSONObj>& splitPoints,
+ const BSONObj& query,
+ const BSONObj& queryCollation,
+ const std::set<ShardId> expectedShardIds) {
+ const ShardKeyPattern shardKeyPattern(shardKey);
+ auto chunkManager =
+ makeChunkManager(shardKeyPattern, std::move(defaultCollator), false, splitPoints);
+
+ std::set<ShardId> shardIds;
+ chunkManager->getShardIdsForQuery(operationContext(), query, queryCollation, &shardIds);
+
+ BSONArrayBuilder expectedBuilder;
+ for (const auto& shardId : expectedShardIds) {
+ expectedBuilder << shardId;
+ }
+
+ BSONArrayBuilder actualBuilder;
+ for (const auto& shardId : shardIds) {
+ actualBuilder << shardId;
+ }
+
+ ASSERT_BSONOBJ_EQ(expectedBuilder.arr(), actualBuilder.arr());
}
+};
- future.timed_get(kFutureTimeout);
+TEST_F(ChunkManagerQueryTest, EmptyQuerySingleShard) {
+ runQueryTest(BSON("a" << 1), nullptr, false, {}, BSONObj(), BSONObj(), {ShardId("0")});
+}
- // Test that a *new* ChunkManager correctly loads the chunks with *no prior info*.
+TEST_F(ChunkManagerQueryTest, EmptyQueryMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSONObj(),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2"), ShardId("3")});
+}
- int numChunks = static_cast<int>(chunks.size());
- BSONObj firstChunk = chunks.back();
- ChunkVersion version = ChunkVersion::fromBSON(firstChunk, ChunkType::DEPRECATED_lastmod());
+TEST_F(ChunkManagerQueryTest, UniversalRangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("b" << 1),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2"), ShardId("3")});
+}
- CollectionType collType;
- collType.setNs(NamespaceString{_collName});
- collType.setEpoch(version.epoch());
- collType.setUpdatedAt(jsTime());
- collType.setKeyPattern(BSON(keyName << 1));
- collType.setUnique(false);
- collType.setDropped(false);
-
- ChunkManager manager(&txn, collType);
- future = launchAsync([&] {
- manager.loadExistingRanges(operationContext(), nullptr);
-
- ASSERT_EQ(version.epoch(), manager.getVersion().epoch());
- ASSERT_EQ(numChunks - 1, manager.getVersion().minorVersion());
- ASSERT_EQ(numChunks, static_cast<int>(manager.getChunkMap().size()));
- });
- expectFindOnConfigSendBSONObjVector(chunks);
- expectFindOnConfigSendBSONObjVector(shards);
- future.timed_get(kFutureTimeout);
+TEST_F(ChunkManagerQueryTest, EqualityRangeSingleShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {},
+ BSON("a"
+ << "x"),
+ BSONObj(),
+ {ShardId("0")});
+}
- // Test that a *new* ChunkManager correctly loads modified chunks *given an old ChunkManager*.
-
- // Simulate modified chunks collection
- ChunkVersion laterVersion = ChunkVersion(2, 1, version.epoch());
- BSONObj oldChunk = chunks.front();
- BSONObjBuilder newChunk;
- newChunk.append("_id", oldChunk.getStringField("_id"));
- newChunk.append("ns", oldChunk.getStringField("ns"));
- newChunk.append("min", oldChunk.getObjectField("min"));
- newChunk.append("max", oldChunk.getObjectField("max"));
- newChunk.append("shard", oldChunk.getStringField("shard"));
- laterVersion.addToBSON(newChunk, ChunkType::DEPRECATED_lastmod());
- newChunk.append("lastmodEpoch", oldChunk.getField("lastmodEpoch").OID());
-
- // Make new manager load chunk diff
- future = launchAsync([&] {
- ChunkManager newManager(manager.getns(),
- manager.getShardKeyPattern(),
- manager.getDefaultCollator() ? manager.getDefaultCollator()->clone()
- : nullptr,
- manager.isUnique());
- newManager.loadExistingRanges(operationContext(), &manager);
-
- ASSERT_EQ(numChunks, static_cast<int>(manager.getChunkMap().size()));
- ASSERT_EQ(laterVersion.toString(), newManager.getVersion().toString());
- });
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunks.back(), newChunk.obj()});
-
- std::cout << "done";
- future.timed_get(kFutureTimeout);
- std::cout << "completely done";
+TEST_F(ChunkManagerQueryTest, EqualityRangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a"
+ << "y"),
+ BSONObj(),
+ {ShardId("2")});
}
-/**
- * Tests that chunk metadata is created correctly when using ChunkManager to create chunks for the
- * first time. Creating chunks on multiple shards is not tested here since there are unresolved
- * race conditions there and probably should be avoided if at all possible.
- */
-TEST_F(ChunkManagerTests, FullTest) {
- string keyName = "_id";
- vector<BSONObj> splitKeys;
- genUniqueRandomSplitKeys(keyName, &splitKeys);
- ShardKeyPattern shardKeyPattern(BSON(keyName << 1));
- std::unique_ptr<CollatorInterface> defaultCollator;
-
- auto future = launchAsync([&] {
- ChunkManager manager(_collName, shardKeyPattern, std::move(defaultCollator), false);
- auto status = manager.createFirstChunks(operationContext(), _shardId, &splitKeys, NULL);
- ASSERT_OK(status);
- });
-
- // Check that config server receives chunks with the expected metadata.
- // Call expectInsertOnConfigCheckMetadataAndReturnOk one extra time since numChunks = numSplits
- // + 1
- set<int> minorVersions;
- OID epoch;
- for (auto it = splitKeys.begin(); it != splitKeys.end(); ++it) {
- expectInsertOnConfigCheckMetadataAndReturnOk(minorVersions, epoch);
- }
- expectInsertOnConfigCheckMetadataAndReturnOk(minorVersions, epoch);
- future.timed_get(kFutureTimeout);
+TEST_F(ChunkManagerQueryTest, SetRangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ fromjson("{a:{$in:['u','y']}}"),
+ BSONObj(),
+ {ShardId("0"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, GTRangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << GT << "x"),
+ BSONObj(),
+ {ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, GTERangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << GTE << "x"),
+ BSONObj(),
+ {ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, LTRangeMultiShard) {
+ // NOTE (SERVER-4791): It isn't actually necessary to return shard 2 because its lowest key is
+ // "y", which is excluded from the query
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << LT << "y"),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, LTERangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << LTE << "y"),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, OrEqualities) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ fromjson("{$or:[{a:'u'},{a:'y'}]}"),
+ BSONObj(),
+ {ShardId("0"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, OrEqualityInequality) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ fromjson("{$or:[{a:'u'},{a:{$gte:'y'}}]}"),
+ BSONObj(),
+ {ShardId("0"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, OrEqualityInequalityUnhelpful) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ fromjson("{$or:[{a:'u'},{a:{$gte:'zz'}},{}]}"),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, UnsatisfiableRangeSingleShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {},
+ BSON("a" << GT << "x" << LT << "x"),
+ BSONObj(),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, UnsatisfiableRangeMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << GT << "x" << LT << "x"),
+ BSONObj(),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, EqualityThenUnsatisfiable) {
+ runQueryTest(BSON("a" << 1 << "b" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << 1 << "b" << GT << 4 << LT << 4),
+ BSONObj(),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, InequalityThenUnsatisfiable) {
+ runQueryTest(BSON("a" << 1 << "b" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << GT << 1 << "b" << GT << 4 << LT << 4),
+ BSONObj(),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, OrEqualityUnsatisfiableInequality) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ fromjson("{$or:[{a:'x'},{a:{$gt:'u',$lt:'u'}},{a:{$gte:'y'}}]}"),
+ BSONObj(),
+ {ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, InMultiShard) {
+ runQueryTest(BSON("a" << 1 << "b" << 1),
+ nullptr,
+ false,
+ {BSON("a" << 5 << "b" << 10), BSON("a" << 5 << "b" << 20)},
+ BSON("a" << BSON("$in" << BSON_ARRAY(0 << 5 << 10)) << "b"
+ << BSON("$in" << BSON_ARRAY(0 << 5 << 25))),
+ BSONObj(),
+ {ShardId("0"), ShardId("1"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, CollationStringsMultiShard) {
+ runQueryTest(BSON("a" << 1),
+ nullptr,
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a"
+ << "y"),
+ BSON("locale"
+ << "mock_reverse_string"),
+ {ShardId("0"), ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, DefaultCollationStringsMultiShard) {
+ runQueryTest(
+ BSON("a" << 1),
+ stdx::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a"
+ << "y"),
+ BSON("locale"
+ << "mock_reverse_string"),
+ {ShardId("0"), ShardId("1"), ShardId("2"), ShardId("3")});
+}
+
+TEST_F(ChunkManagerQueryTest, SimpleCollationStringsMultiShard) {
+ runQueryTest(
+ BSON("a" << 1),
+ stdx::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a"
+ << "y"),
+ BSON("locale"
+ << "simple"),
+ {ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, CollationNumbersMultiShard) {
+ runQueryTest(
+ BSON("a" << 1),
+ stdx::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << 5),
+ BSON("locale"
+ << "mock_reverse_string"),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, DefaultCollationNumbersMultiShard) {
+ runQueryTest(
+ BSON("a" << 1),
+ stdx::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << 5),
+ BSONObj(),
+ {ShardId("0")});
+}
+
+TEST_F(ChunkManagerQueryTest, SimpleCollationNumbersMultiShard) {
+ runQueryTest(
+ BSON("a" << 1),
+ stdx::make_unique<CollatorInterfaceMock>(CollatorInterfaceMock::MockType::kReverseString),
+ false,
+ {BSON("a"
+ << "x"),
+ BSON("a"
+ << "y"),
+ BSON("a"
+ << "z")},
+ BSON("a" << 5),
+ BSON("locale"
+ << "simple"),
+ {ShardId("0")});
}
} // namespace
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index c9a7bbaea0b..1f6323a891d 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -35,6 +35,8 @@
#include <vector>
#include "mongo/db/lasterror.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -194,7 +196,8 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// Reload the chunk manager outside of the DBConfig's mutex so as to not block operations
// for different collections on the same database
tempChunkManager.reset(new ChunkManager(
- oldManager->getns(),
+ NamespaceString(oldManager->getns()),
+ oldManager->getVersion().epoch(),
oldManager->getShardKeyPattern(),
oldManager->getDefaultCollator() ? oldManager->getDefaultCollator()->clone() : nullptr,
oldManager->isUnique()));
@@ -321,8 +324,25 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
_collections.erase(coll.getNs().ns());
if (!coll.getDropped()) {
+ std::unique_ptr<CollatorInterface> defaultCollator;
+ if (!coll.getDefaultCollation().isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ ->makeFromBSON(coll.getDefaultCollation());
+
+ // The collation was validated upon collection creation.
+ invariantOK(statusWithCollator.getStatus());
+
+ defaultCollator = std::move(statusWithCollator.getValue());
+ }
+
+ std::unique_ptr<ChunkManager> manager(
+ stdx::make_unique<ChunkManager>(coll.getNs(),
+ coll.getEpoch(),
+ ShardKeyPattern(coll.getKeyPattern()),
+ std::move(defaultCollator),
+ coll.getUnique()));
+
// Do the blocking collection load
- std::unique_ptr<ChunkManager> manager(stdx::make_unique<ChunkManager>(txn, coll));
manager->loadExistingRanges(txn, nullptr);
// Collections with no chunks are unsharded, no matter what the collections entry says