summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-07 12:12:31 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-08 14:52:55 -0500
commit3e8068a3df77ce9574c396774bde2d51333802d4 (patch)
treec56ac709e97279f4022b7917bf75bb68149bcbfe
parentbc1e45dc19c488eb27dd93aaff33ad0145e420ab (diff)
downloadmongo-3e8068a3df77ce9574c396774bde2d51333802d4.tar.gz
SERVER-27321 Rename ScopedChunkManager::getExisting to refreshAndGet
Also replace usages of direct cache retrieval with invocations to the scoped retrieval methods. There are no functional changes, only cleanup on the way to remove DBConfig::reload, which is one of the refresh methods which can block access to the entire database while only partial reloading is happening.
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp8
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp6
-rw-r--r--src/mongo/s/chunk.cpp2
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp64
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp64
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp104
-rw-r--r--src/mongo/s/config.cpp6
-rw-r--r--src/mongo/s/sharding_raii.cpp4
-rw-r--r--src/mongo/s/sharding_raii.h11
11 files changed, 108 insertions, 172 deletions
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index c90ecb53aae..6a9795646ff 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -549,7 +549,7 @@ Status Balancer::_enforceTagRanges(OperationContext* txn) {
}
for (const auto& splitInfo : chunksToSplitStatus.getValue()) {
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, splitInfo.nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, splitInfo.nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -630,7 +630,7 @@ int Balancer::_moveChunks(OperationContext* txn,
void Balancer::_splitOrMarkJumbo(OperationContext* txn,
const NamespaceString& nss,
const BSONObj& minKey) {
- auto scopedChunkManager = uassertStatusOK(ScopedChunkManager::getExisting(txn, nss));
+ auto scopedChunkManager = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
ChunkManager* const chunkManager = scopedChunkManager.cm();
auto chunk = chunkManager->findIntersectingChunkWithSimpleCollation(txn, minKey);
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index ef688f73dab..487cd76739e 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -295,7 +295,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
const NamespaceString nss(chunk.getNS());
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -325,7 +325,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
const NamespaceString nss(chunk.getNS());
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -358,7 +358,7 @@ Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* txn,
StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection(
OperationContext* txn, const NamespaceString& nss, const ShardStatisticsVector& shardStats) {
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -412,7 +412,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
const NamespaceString& nss,
const ShardStatisticsVector& shardStats,
bool aggressiveBalanceHint) {
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index ce81d189d2a..bd8465338f0 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -252,7 +252,7 @@ Status MigrationManager::executeManualMigration(
waitForDelete)
->get();
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, NamespaceString(migrateInfo.ns));
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, NamespaceString(migrateInfo.ns));
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -395,7 +395,7 @@ void MigrationManager::finishRecovery(OperationContext* txn,
auto& migrateInfos = nssAndMigrateInfos.second;
invariant(!migrateInfos.empty());
- auto scopedCMStatus = ScopedChunkManager::getExisting(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
if (!scopedCMStatus.isOK()) {
// This shouldn't happen because the collection was intact and sharded when the previous
// config primary was active and the dist locks have been held by the balancer
@@ -529,7 +529,7 @@ shared_ptr<Notification<RemoteCommandResponse>> MigrationManager::_schedule(
// Sanity checks that the chunk being migrated is actually valid. These will be repeated at the
// shard as well, but doing them here saves an extra network call, which might otherwise fail.
- auto statusWithScopedChunkManager = ScopedChunkManager::getExisting(txn, nss);
+ auto statusWithScopedChunkManager = ScopedChunkManager::refreshAndGet(txn, nss);
if (!statusWithScopedChunkManager.isOK()) {
return std::make_shared<Notification<RemoteCommandResponse>>(
std::move(statusWithScopedChunkManager.getStatus()));
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index f636e4fbad9..cc1750ff074 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -407,7 +407,7 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) {
// We need to use the latest chunk manager (after the split) in order to have the most
// up-to-date view of the chunk we are about to move
- auto scopedCM = uassertStatusOK(ScopedChunkManager::getExisting(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
auto suggestedChunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(
txn, suggestedMigrateChunk->getMin());
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 44f8e74ccf2..26dde5698c9 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,13 +17,13 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
#include "mongo/platform/basic.h"
@@ -42,6 +42,7 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_raii.h"
namespace mongo {
@@ -105,9 +106,8 @@ public:
string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << nss.ns() << " is not a valid namespace",
- nss.isValid());
+
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
@@ -137,47 +137,28 @@ public:
return false;
}
- auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
- }
-
- // This refreshes the chunk metadata if stale.
- shared_ptr<ChunkManager> manager = config->getChunkManagerIfExists(txn, nss.ns(), true);
- if (!manager) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
- }
+ auto const cm = scopedCM.cm();
- if (!manager->getShardKeyPattern().isShardKey(minKey) ||
- !manager->getShardKeyPattern().isShardKey(maxKey)) {
+ if (!cm->getShardKeyPattern().isShardKey(minKey) ||
+ !cm->getShardKeyPattern().isShardKey(maxKey)) {
errmsg = stream() << "shard key bounds "
<< "[" << minKey << "," << maxKey << ")"
<< " are not valid for shard key pattern "
- << manager->getShardKeyPattern().toBSON();
+ << cm->getShardKeyPattern().toBSON();
return false;
}
- minKey = manager->getShardKeyPattern().normalizeShardKey(minKey);
- maxKey = manager->getShardKeyPattern().normalizeShardKey(maxKey);
+ minKey = cm->getShardKeyPattern().normalizeShardKey(minKey);
+ maxKey = cm->getShardKeyPattern().normalizeShardKey(maxKey);
- shared_ptr<Chunk> firstChunk =
- manager->findIntersectingChunkWithSimpleCollation(txn, minKey);
- verify(firstChunk);
+ shared_ptr<Chunk> firstChunk = cm->findIntersectingChunkWithSimpleCollation(txn, minKey);
BSONObjBuilder remoteCmdObjB;
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::nsField()]);
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::boundsField()]);
- remoteCmdObjB.append(ClusterMergeChunksCommand::configField(),
- grid.shardRegistry()->getConfigServerConnectionString().toString());
+ remoteCmdObjB.append(
+ ClusterMergeChunksCommand::configField(),
+ Grid::get(txn)->shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(), firstChunk->getShardId());
@@ -185,7 +166,8 @@ public:
// Throws, but handled at level above. Don't want to rewrap to preserve exception
// formatting.
- const auto shardStatus = grid.shardRegistry()->getShard(txn, firstChunk->getShardId());
+ const auto shardStatus =
+ Grid::get(txn)->shardRegistry()->getShard(txn, firstChunk->getShardId());
if (!shardStatus.isOK()) {
return appendCommandStatus(
result,
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 1040669d275..4404d9663a0 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -53,7 +53,6 @@
namespace mongo {
using std::shared_ptr;
-using std::unique_ptr;
using std::string;
namespace {
@@ -108,26 +107,7 @@ public:
const NamespaceString nss(parseNs(dbname, cmdObj));
- std::shared_ptr<DBConfig> config;
-
- {
- auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- config = status.getValue();
- }
-
- if (!config->isSharded(nss.ns())) {
- config->reload(txn);
-
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
- }
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
const string toString = cmdObj["to"].valuestrsafe();
if (!toString.size()) {
@@ -135,7 +115,7 @@ public:
return false;
}
- const auto toStatus = grid.shardRegistry()->getShard(txn, toString);
+ const auto toStatus = Grid::get(txn)->shardRegistry()->getShard(txn, toString);
if (!toStatus.isOK()) {
string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
<< toString
@@ -143,6 +123,7 @@ public:
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
+
const auto to = toStatus.getValue();
// so far, chunk size serves test purposes; it may or may not become a supported parameter
@@ -160,43 +141,35 @@ public:
return false;
}
- // This refreshes the chunk metadata if stale
- auto scopedCM = uassertStatusOK(ScopedChunkManager::getExisting(txn, nss));
- ChunkManager* const info = scopedCM.cm();
+ auto const cm = scopedCM.cm();
shared_ptr<Chunk> chunk;
if (!find.isEmpty()) {
- StatusWith<BSONObj> status =
- info->getShardKeyPattern().extractShardKeyFromQuery(txn, find);
-
- // Bad query
- if (!status.isOK())
- return appendCommandStatus(result, status.getStatus());
-
- BSONObj shardKey = status.getValue();
-
+ // find
+ BSONObj shardKey =
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
if (shardKey.isEmpty()) {
errmsg = str::stream() << "no shard key found in chunk query " << find;
return false;
}
- chunk = info->findIntersectingChunkWithSimpleCollation(txn, shardKey);
+ chunk = cm->findIntersectingChunkWithSimpleCollation(txn, shardKey);
} else {
- // Bounds
- if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
- !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
+ // bounds
+ if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
+ !cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
errmsg = str::stream() << "shard key bounds "
<< "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
<< " are not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
+ << cm->getShardKeyPattern().toBSON();
return false;
}
- BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
+ BSONObj minKey = cm->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
+ BSONObj maxKey = cm->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- chunk = info->findIntersectingChunkWithSimpleCollation(txn, minKey);
+ chunk = cm->findIntersectingChunkWithSimpleCollation(txn, minKey);
if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
errmsg = str::stream() << "no chunk found with the shard key bounds "
@@ -213,7 +186,7 @@ public:
chunkType.setMin(chunk->getMin());
chunkType.setMax(chunk->getMax());
chunkType.setShard(chunk->getShardId());
- chunkType.setVersion(info->getVersion());
+ chunkType.setVersion(cm->getVersion());
uassertStatusOK(configsvr_client::moveChunk(txn,
chunkType,
@@ -222,8 +195,9 @@ public:
secondaryThrottle,
cmdObj["_waitForDelete"].trueValue()));
- // Make sure the chunk manager is updated with the migrated chunk
- info->reload(txn);
+ // Proactively refresh the chunk manager. Not strictly necessary, but this way it's
+ // immediately up-to-date the next time it's used.
+ cm->reload(txn);
result.append("millis", t.millis());
return true;
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 4bbfd3531c7..63d94120343 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -115,7 +115,7 @@ public:
}
// Flush all cached information. This can't be perfect, but it's better than nothing.
- grid.catalogCache()->invalidate(dbname);
+ Grid::get(txn)->catalogCache()->invalidate(dbname);
auto status = grid.catalogCache()->getDatabase(txn, dbname);
if (!status.isOK()) {
@@ -209,6 +209,7 @@ public:
errmsg = "clone failed";
return false;
}
+
bool hasWCError = false;
if (auto wcErrorElem = cloneRes["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(toShard->getId(), wcErrorElem, result);
@@ -220,7 +221,9 @@ public:
ScopedDbConnection fromconn(fromShard->getConnString());
config->setPrimary(txn, toShard->getId());
- config->reload(txn);
+
+ // Ensure the next attempt to retrieve the database will do a full reload
+ Grid::get(txn)->catalogCache()->invalidate(dbname);
if (shardedColls.empty()) {
// TODO: Collections can be created in the meantime, and we should handle in the future.
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 63b55a8b348..489b256c696 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -46,6 +46,7 @@
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
+#include "mongo/s/sharding_raii.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -103,25 +104,8 @@ public:
std::string& errmsg,
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << nss.ns() << " is not a valid namespace",
- nss.isValid());
- auto status = grid.catalogCache()->getDatabase(txn, nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- config->reload(txn);
-
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
- }
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
const BSONField<BSONObj> findField("find", BSONObj());
const BSONField<BSONArray> boundsField("bounds", BSONArray());
@@ -176,79 +160,67 @@ public:
return false;
}
- // This refreshes the chunk metadata if stale.
- shared_ptr<ChunkManager> info = config->getChunkManager(txn, nss.ns(), true);
+ auto const cm = scopedCM.cm();
+
shared_ptr<Chunk> chunk;
if (!find.isEmpty()) {
- StatusWith<BSONObj> status =
- info->getShardKeyPattern().extractShardKeyFromQuery(txn, find);
-
- // Bad query
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- BSONObj shardKey = status.getValue();
+ // find
+ BSONObj shardKey =
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
if (shardKey.isEmpty()) {
errmsg = stream() << "no shard key found in chunk query " << find;
return false;
}
- chunk = info->findIntersectingChunkWithSimpleCollation(txn, shardKey);
+ chunk = cm->findIntersectingChunkWithSimpleCollation(txn, shardKey);
} else if (!bounds.isEmpty()) {
- if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
- !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = stream() << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
+ // bounds
+ if (!cm->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
+ !cm->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
+ errmsg = str::stream() << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern "
+ << cm->getShardKeyPattern().toBSON();
return false;
}
- BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
+ BSONObj minKey = cm->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
+ BSONObj maxKey = cm->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- chunk = info->findIntersectingChunkWithSimpleCollation(txn, minKey);
- invariant(chunk.get());
+ chunk = cm->findIntersectingChunkWithSimpleCollation(txn, minKey);
if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
- errmsg = stream() << "no chunk found with the shard key bounds "
- << "[" << minKey << "," << maxKey << ")";
+ errmsg = str::stream() << "no chunk found with the shard key bounds "
+ << ChunkRange(minKey, maxKey).toString();
return false;
}
} else {
- // Middle
- if (!info->getShardKeyPattern().isShardKey(middle)) {
- errmsg = stream() << "new split key " << middle
- << " is not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
+ // middle
+ if (!cm->getShardKeyPattern().isShardKey(middle)) {
+ errmsg = str::stream() << "new split key " << middle
+ << " is not valid for shard key pattern "
+ << cm->getShardKeyPattern().toBSON();
return false;
}
- middle = info->getShardKeyPattern().normalizeShardKey(middle);
+ middle = cm->getShardKeyPattern().normalizeShardKey(middle);
// Check shard key size when manually provided
- Status status = ShardKeyPattern::checkShardKeySize(middle);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ uassertStatusOK(ShardKeyPattern::checkShardKeySize(middle));
- chunk = info->findIntersectingChunkWithSimpleCollation(txn, middle);
- invariant(chunk.get());
+ chunk = cm->findIntersectingChunkWithSimpleCollation(txn, middle);
if (chunk->getMin().woCompare(middle) == 0 || chunk->getMax().woCompare(middle) == 0) {
- errmsg = stream() << "new split key " << middle
- << " is a boundary key of existing chunk "
- << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
+ errmsg = str::stream() << "new split key " << middle
+ << " is a boundary key of existing chunk "
+ << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
return false;
}
}
- invariant(chunk.get());
-
- log() << "splitting chunk [" << redact(chunk->getMin().toString()) << ","
- << redact(chunk->getMax().toString()) << ")"
+ log() << "Splitting chunk "
+ << redact(ChunkRange(chunk->getMin(), chunk->getMax()).toString())
<< " in collection " << nss.ns() << " on shard " << chunk->getShardId();
BSONObj res;
@@ -258,22 +230,22 @@ public:
uassertStatusOK(shardutil::splitChunkAtMultiplePoints(txn,
chunk->getShardId(),
nss,
- info->getShardKeyPattern(),
- info->getVersion(),
+ cm->getShardKeyPattern(),
+ cm->getVersion(),
chunk->getMin(),
chunk->getMax(),
chunk->getLastmod(),
{middle}));
}
- // Proactively refresh the chunk manager. Not really necessary, but this way it's
+ // Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- info->reload(txn);
+ cm->reload(txn);
return true;
}
-} splitCollectionCmd;
+} splitChunk;
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index e8b5593f00a..1932bc4803f 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -286,8 +286,10 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
_loadIfNeeded(txn, currentReloadIteration);
}
- CollectionInfo& ci = _collections[ns];
- uassert(10181, str::stream() << "not sharded:" << ns, ci.isSharded());
+ const CollectionInfo& ci = _collections[ns];
+ uassert(ErrorCodes::NamespaceNotSharded,
+ str::stream() << "Collection is not sharded: " << ns,
+ ci.isSharded());
invariant(!ci.key().isEmpty());
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index 7902657b506..87f1127257d 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -83,8 +83,8 @@ ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<C
ScopedChunkManager::~ScopedChunkManager() = default;
-StatusWith<ScopedChunkManager> ScopedChunkManager::getExisting(OperationContext* txn,
- const NamespaceString& nss) {
+StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn,
+ const NamespaceString& nss) {
auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h
index a8ec79cc328..725c8b86ffe 100644
--- a/src/mongo/s/sharding_raii.h
+++ b/src/mongo/s/sharding_raii.h
@@ -90,11 +90,14 @@ public:
~ScopedChunkManager();
/**
- * Ensures that the specified database and collection both exist in the cache and if so, returns
- * it. Otherwise, if it does not exist or any other error occurs, passes that error back.
+ * If the specified database and collection do not exist in the cache, tries to load them from
+ * the config server and returns a reference. If they are already in the cache, makes a call to
+ * the config server to check if there are any incremental updates to the collection chunk
+ * metadata and if so incorporates those. Otherwise, if it does not exist or any other error
+ * occurs, passes that error back.
*/
- static StatusWith<ScopedChunkManager> getExisting(OperationContext* txn,
- const NamespaceString& nss);
+ static StatusWith<ScopedChunkManager> refreshAndGet(OperationContext* txn,
+ const NamespaceString& nss);
/**
* Returns the underlying database for which we hold reference.