summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2017-11-29 03:54:49 +0000
committerKevin Pulo <kevin.pulo@mongodb.com>2017-12-18 05:47:06 +0000
commit8a9090c4c59a840639f37c509fd0f2c6af252117 (patch)
treebc129d2c254438a6a971eb246fa4b6246310096d /src
parent8467708af7fa83f8827362b80f56dab4aad30a41 (diff)
downloadmongo-8a9090c4c59a840639f37c509fd0f2c6af252117.tar.gz
SERVER-18138 catalog manager listing methods now return vectors
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp35
-rw-r--r--src/mongo/db/s/config/configsvr_remove_shard_command.cpp14
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h38
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp68
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h34
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp32
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h34
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h5
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp11
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp108
-rw-r--r--src/mongo/s/catalog_cache.cpp5
-rw-r--r--src/mongo/s/commands/cluster_commands_helpers.cpp5
-rw-r--r--src/mongo/s/config_server_catalog_cache_loader.cpp4
13 files changed, 171 insertions, 222 deletions
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 3654fcdcd8d..2bf7db8dc9a 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -83,15 +83,15 @@ StatusWith<DistributionStatus> createCollectionDistributionStatus(
shardToChunksMap[chunkEntry->getShardId()].push_back(chunk);
}
- vector<TagsType> collectionTags;
- Status tagsStatus = Grid::get(opCtx)->catalogClient()->getTagsForCollection(
- opCtx, chunkMgr->getns(), &collectionTags);
- if (!tagsStatus.isOK()) {
- return {tagsStatus.code(),
+ const auto swCollectionTags =
+ Grid::get(opCtx)->catalogClient()->getTagsForCollection(opCtx, chunkMgr->getns());
+ if (!swCollectionTags.isOK()) {
+ return {swCollectionTags.getStatus().code(),
str::stream() << "Unable to load tags for collection " << chunkMgr->getns()
<< " due to "
- << tagsStatus.toString()};
+ << swCollectionTags.getStatus().toString()};
}
+ const auto& collectionTags = swCollectionTags.getValue();
DistributionStatus distribution(NamespaceString(chunkMgr->getns()),
std::move(shardToChunksMap));
@@ -191,14 +191,14 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToSpli
const auto shardStats = std::move(shardStatsStatus.getValue());
- vector<CollectionType> collections;
-
- Status collsStatus =
- Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, &collections, nullptr);
- if (!collsStatus.isOK()) {
- return collsStatus;
+ const auto swCollections =
+ Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, nullptr);
+ if (!swCollections.isOK()) {
+ return swCollections.getStatus();
}
+ const auto& collections = swCollections.getValue();
+
if (collections.empty()) {
return SplitInfoVector{};
}
@@ -243,14 +243,15 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::selectChunksToMo
return MigrateInfoVector{};
}
- vector<CollectionType> collections;
- Status collsStatus =
- Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, &collections, nullptr);
- if (!collsStatus.isOK()) {
- return collsStatus;
+ const auto swCollections =
+ Grid::get(opCtx)->catalogClient()->getCollections(opCtx, nullptr, nullptr);
+ if (!swCollections.isOK()) {
+ return swCollections.getStatus();
}
+ const auto& collections = swCollections.getValue();
+
if (collections.empty()) {
return MigrateInfoVector{};
}
diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
index ad4188d7647..d16fa7950c0 100644
--- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
+++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp
@@ -118,9 +118,8 @@ public:
const auto shardDrainingStatus =
uassertStatusOK(shardingCatalogManager->removeShard(opCtx, shard->getId()));
- std::vector<std::string> databases;
- uassertStatusOK(
- shardingCatalogManager->getDatabasesForShard(opCtx, shard->getId(), &databases));
+ std::vector<std::string> databases =
+ uassertStatusOK(shardingCatalogManager->getDatabasesForShard(opCtx, shard->getId()));
// Get BSONObj containing:
// 1) note about moving or dropping databases in a shard
@@ -149,19 +148,18 @@ public:
result.appendElements(dbInfo);
break;
case ShardDrainingStatus::ONGOING: {
- std::vector<ChunkType> chunks;
- Status status = Grid::get(opCtx)->catalogClient()->getChunks(
+ const auto swChunks = Grid::get(opCtx)->catalogClient()->getChunks(
opCtx,
BSON(ChunkType::shard(shard->getId().toString())),
BSONObj(),
boost::none, // return all
- &chunks,
nullptr,
repl::ReadConcernLevel::kMajorityReadConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ if (!swChunks.isOK()) {
+ return appendCommandStatus(result, swChunks.getStatus());
}
+ const auto& chunks = swChunks.getValue();
result.append("msg", "draining ongoing");
result.append("state", "ongoing");
result.append("remaining",
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index 0cc0181f072..b73ae3f38ff 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -148,17 +148,15 @@ public:
*
* @param dbName an optional database name. Must be nullptr or non-empty. If nullptr is
* specified, all collections on the system are returned.
- * @param collections variable to receive the set of collections.
* @param optime an out parameter that will contain the opTime of the config server.
* Can be null. Note that collections can be fetched in multiple batches and each batch
* can have a unique opTime. This opTime will be the one from the last batch.
*
- * Returns a !OK status if an error occurs.
+ * Returns the set of collections, or a !OK status if an error occurs.
*/
- virtual Status getCollections(OperationContext* opCtx,
- const std::string* dbName,
- std::vector<CollectionType>* collections,
- repl::OpTime* optime) = 0;
+ virtual StatusWith<std::vector<CollectionType>> getCollections(OperationContext* opCtx,
+ const std::string* dbName,
+ repl::OpTime* optime) = 0;
/**
* Drops the specified collection from the collection metadata store.
@@ -174,9 +172,8 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardId,
- std::vector<std::string>* dbs) = 0;
+ virtual StatusWith<std::vector<std::string>> getDatabasesForShard(OperationContext* opCtx,
+ const ShardId& shardId) = 0;
/**
* Gets the requested number of chunks (of type ChunkType) that satisfy a query.
@@ -184,28 +181,27 @@ public:
* @param filter The query to filter out the results.
* @param sort Fields to use for sorting the results. Pass empty BSON object for no sort.
* @param limit The number of chunk entries to return. Pass boost::none for no limit.
- * @param chunks Vector entry to receive the results
* @param optime an out parameter that will contain the opTime of the config server.
* Can be null. Note that chunks can be fetched in multiple batches and each batch
* can have a unique opTime. This opTime will be the one from the last batch.
* @param readConcern The readConcern to use while querying for chunks.
*
- * Returns a !OK status if an error occurs.
+ * Returns a vector of ChunkTypes, or a !OK status if an error occurs.
*/
- virtual Status getChunks(OperationContext* opCtx,
- const BSONObj& filter,
- const BSONObj& sort,
- boost::optional<int> limit,
- std::vector<ChunkType>* chunks,
- repl::OpTime* opTime,
- repl::ReadConcernLevel readConcern) = 0;
+ virtual StatusWith<std::vector<ChunkType>> getChunks(OperationContext* opCtx,
+ const BSONObj& filter,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ repl::OpTime* opTime,
+ repl::ReadConcernLevel readConcern) = 0;
/**
* Retrieves all tags for the specified collection.
+ *
+ * Returns a !OK status if an error occurs.
*/
- virtual Status getTagsForCollection(OperationContext* opCtx,
- const std::string& collectionNs,
- std::vector<TagsType>* tags) = 0;
+ virtual StatusWith<std::vector<TagsType>> getTagsForCollection(
+ OperationContext* opCtx, const std::string& collectionNs) = 0;
/**
* Retrieves all shards in this sharded cluster.
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 10ef8131969..266d9556e89 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -376,10 +376,8 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle
return repl::OpTimeWith<CollectionType>(collType, retOpTimePair.opTime);
}
-Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx,
- const std::string* dbName,
- std::vector<CollectionType>* collections,
- OpTime* opTime) {
+StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollections(
+ OperationContext* opCtx, const std::string* dbName, OpTime* opTime) {
BSONObjBuilder b;
if (dbName) {
invariant(!dbName->empty());
@@ -400,10 +398,10 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx,
const auto& docsOpTimePair = findStatus.getValue();
+ std::vector<CollectionType> collections;
for (const BSONObj& obj : docsOpTimePair.value) {
const auto collectionResult = CollectionType::fromBSON(obj);
if (!collectionResult.isOK()) {
- collections->clear();
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS
<< " document: "
@@ -412,14 +410,14 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx,
<< collectionResult.getStatus().toString()};
}
- collections->push_back(collectionResult.getValue());
+ collections.push_back(collectionResult.getValue());
}
if (opTime) {
*opTime = docsOpTimePair.opTime;
}
- return Status::OK();
+ return collections;
}
Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
@@ -671,9 +669,8 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
return versionTypeResult.getValue();
}
-Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardId,
- vector<string>* dbs) {
+StatusWith<std::vector<std::string>> ShardingCatalogClientImpl::getDatabasesForShard(
+ OperationContext* opCtx, const ShardId& shardId) {
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
@@ -685,30 +682,29 @@ Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* opCtx,
return findStatus.getStatus();
}
+ std::vector<std::string> dbs;
for (const BSONObj& obj : findStatus.getValue().value) {
string dbName;
Status status = bsonExtractStringField(obj, DatabaseType::name(), &dbName);
if (!status.isOK()) {
- dbs->clear();
return status;
}
- dbs->push_back(dbName);
+ dbs.push_back(dbName);
}
- return Status::OK();
+ return dbs;
}
-Status ShardingCatalogClientImpl::getChunks(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<int> limit,
- vector<ChunkType>* chunks,
- OpTime* opTime,
- repl::ReadConcernLevel readConcern) {
+StatusWith<std::vector<ChunkType>> ShardingCatalogClientImpl::getChunks(
+ OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ OpTime* opTime,
+ repl::ReadConcernLevel readConcern) {
invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer ||
readConcern == repl::ReadConcernLevel::kMajorityReadConcern);
- chunks->clear();
// Convert boost::optional<int> to boost::optional<long long>.
auto longLimit = limit ? boost::optional<long long>(*limit) : boost::none;
@@ -726,31 +722,29 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* opCtx,
}
const auto& chunkDocsOpTimePair = findStatus.getValue();
+
+ std::vector<ChunkType> chunks;
for (const BSONObj& obj : chunkDocsOpTimePair.value) {
auto chunkRes = ChunkType::fromConfigBSON(obj);
if (!chunkRes.isOK()) {
- chunks->clear();
return {chunkRes.getStatus().code(),
stream() << "Failed to parse chunk with id " << obj[ChunkType::name()]
<< " due to "
<< chunkRes.getStatus().reason()};
}
- chunks->push_back(chunkRes.getValue());
+ chunks.push_back(chunkRes.getValue());
}
if (opTime) {
*opTime = chunkDocsOpTimePair.opTime;
}
- return Status::OK();
+ return chunks;
}
-Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* opCtx,
- const std::string& collectionNs,
- std::vector<TagsType>* tags) {
- tags->clear();
-
+StatusWith<std::vector<TagsType>> ShardingCatalogClientImpl::getTagsForCollection(
+ OperationContext* opCtx, const std::string& collectionNs) {
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
@@ -764,20 +758,21 @@ Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* opCtx,
}
const auto& tagDocsOpTimePair = findStatus.getValue();
+
+ std::vector<TagsType> tags;
for (const BSONObj& obj : tagDocsOpTimePair.value) {
auto tagRes = TagsType::fromBSON(obj);
if (!tagRes.isOK()) {
- tags->clear();
return {tagRes.getStatus().code(),
str::stream() << "Failed to parse tag with id " << obj[TagsType::tag()]
<< " due to "
<< tagRes.getStatus().toString()};
}
- tags->push_back(tagRes.getValue());
+ tags.push_back(tagRes.getValue());
}
- return Status::OK();
+ return tags;
}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::getAllShards(
@@ -980,16 +975,15 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCt
// Look for the chunk in this shard whose version got bumped. We assume that if that
// mod made it to the config server, then applyOps was successful.
- std::vector<ChunkType> newestChunk;
BSONObjBuilder query;
lastChunkVersion.addToBSON(query, ChunkType::lastmod());
query.append(ChunkType::ns(), nss);
- Status chunkStatus =
- getChunks(opCtx, query.obj(), BSONObj(), 1, &newestChunk, nullptr, readConcern);
+ auto swChunks = getChunks(opCtx, query.obj(), BSONObj(), 1, nullptr, readConcern);
+ const auto& newestChunk = swChunks.getValue();
- if (!chunkStatus.isOK()) {
+ if (!swChunks.isOK()) {
errMsg = str::stream() << "getChunks function failed, unable to validate chunk "
- << "operation metadata: " << chunkStatus.toString()
+ << "operation metadata: " << swChunks.getStatus().toString()
<< ". applyChunkOpsDeprecated failed to get confirmation "
<< "of commit. Unable to save chunk ops. Command: " << cmd
<< ". Result: " << response.getValue().response;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index c11ec37f751..7934cce207f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -94,28 +94,24 @@ public:
StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* opCtx,
- const std::string* dbName,
- std::vector<CollectionType>* collections,
- repl::OpTime* optime) override;
+ StatusWith<std::vector<CollectionType>> getCollections(OperationContext* opCtx,
+ const std::string* dbName,
+ repl::OpTime* optime) override;
Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardName,
- std::vector<std::string>* dbs) override;
-
- Status getChunks(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<int> limit,
- std::vector<ChunkType>* chunks,
- repl::OpTime* opTime,
- repl::ReadConcernLevel readConcern) override;
-
- Status getTagsForCollection(OperationContext* opCtx,
- const std::string& collectionNs,
- std::vector<TagsType>* tags) override;
+ StatusWith<std::vector<std::string>> getDatabasesForShard(OperationContext* opCtx,
+ const ShardId& shardName) override;
+
+ StatusWith<std::vector<ChunkType>> getChunks(OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ repl::OpTime* opTime,
+ repl::ReadConcernLevel readConcern) override;
+
+ StatusWith<std::vector<TagsType>> getTagsForCollection(
+ OperationContext* opCtx, const std::string& collectionNs) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 982e06e085f..8d99100c74c 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -32,10 +32,12 @@
#include "mongo/base/status.h"
#include "mongo/db/repl/optime.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
+#include "mongo/s/catalog/type_tags.h"
#include "mongo/stdx/memory.h"
namespace mongo {
@@ -82,10 +84,8 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getColle
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getCollections(OperationContext* opCtx,
- const string* dbName,
- vector<CollectionType>* collections,
- repl::OpTime* optime) {
+StatusWith<std::vector<CollectionType>> ShardingCatalogClientMock::getCollections(
+ OperationContext* opCtx, const string* dbName, repl::OpTime* optime) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
@@ -94,25 +94,23 @@ Status ShardingCatalogClientMock::dropCollection(OperationContext* opCtx,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardName,
- vector<string>* dbs) {
+StatusWith<std::vector<std::string>> ShardingCatalogClientMock::getDatabasesForShard(
+ OperationContext* opCtx, const ShardId& shardName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getChunks(OperationContext* opCtx,
- const BSONObj& filter,
- const BSONObj& sort,
- boost::optional<int> limit,
- std::vector<ChunkType>* chunks,
- repl::OpTime* opTime,
- repl::ReadConcernLevel readConcern) {
+StatusWith<std::vector<ChunkType>> ShardingCatalogClientMock::getChunks(
+ OperationContext* opCtx,
+ const BSONObj& filter,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ repl::OpTime* opTime,
+ repl::ReadConcernLevel readConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* opCtx,
- const string& collectionNs,
- vector<TagsType>* tags) {
+StatusWith<std::vector<TagsType>> ShardingCatalogClientMock::getTagsForCollection(
+ OperationContext* opCtx, const string& collectionNs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 2b33aed396f..bba63ca3ef7 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -59,28 +59,24 @@ public:
StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* opCtx,
- const std::string* dbName,
- std::vector<CollectionType>* collections,
- repl::OpTime* optime) override;
+ StatusWith<std::vector<CollectionType>> getCollections(OperationContext* opCtx,
+ const std::string* dbName,
+ repl::OpTime* optime) override;
Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardName,
- std::vector<std::string>* dbs) override;
-
- Status getChunks(OperationContext* opCtx,
- const BSONObj& filter,
- const BSONObj& sort,
- boost::optional<int> limit,
- std::vector<ChunkType>* chunks,
- repl::OpTime* opTime,
- repl::ReadConcernLevel readConcern) override;
-
- Status getTagsForCollection(OperationContext* opCtx,
- const std::string& collectionNs,
- std::vector<TagsType>* tags) override;
+ StatusWith<std::vector<std::string>> getDatabasesForShard(OperationContext* opCtx,
+ const ShardId& shardName) override;
+
+ StatusWith<std::vector<ChunkType>> getChunks(OperationContext* opCtx,
+ const BSONObj& filter,
+ const BSONObj& sort,
+ boost::optional<int> limit,
+ repl::OpTime* opTime,
+ repl::ReadConcernLevel readConcern) override;
+
+ StatusWith<std::vector<TagsType>> getTagsForCollection(
+ OperationContext* opCtx, const std::string& collectionNs) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index cd36a69cbfe..4861a76c1b1 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -215,9 +215,8 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- Status getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardId,
- std::vector<std::string>* dbs);
+ StatusWith<std::vector<std::string>> getDatabasesForShard(OperationContext* opCtx,
+ const ShardId& shardId);
//
// Collection Operations
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
index a852359f12f..e0c4574e510 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
@@ -138,9 +138,8 @@ void ShardingCatalogManager::enableSharding(OperationContext* opCtx, const std::
uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateDatabase(opCtx, dbName, dbType));
}
-Status ShardingCatalogManager::getDatabasesForShard(OperationContext* opCtx,
- const ShardId& shardId,
- std::vector<std::string>* dbs) {
+StatusWith<std::vector<std::string>> ShardingCatalogManager::getDatabasesForShard(
+ OperationContext* opCtx, const ShardId& shardId) {
auto findStatus = Grid::get(opCtx)->catalogClient()->_exhaustiveFindOnConfig(
opCtx,
kConfigReadSelector,
@@ -153,18 +152,18 @@ Status ShardingCatalogManager::getDatabasesForShard(OperationContext* opCtx,
if (!findStatus.isOK())
return findStatus.getStatus();
+ std::vector<std::string> dbs;
for (const BSONObj& obj : findStatus.getValue().value) {
std::string dbName;
Status status = bsonExtractStringField(obj, DatabaseType::name(), &dbName);
if (!status.isOK()) {
- dbs->clear();
return status;
}
- dbs->push_back(dbName);
+ dbs.push_back(dbName);
}
- return Status::OK();
+ return dbs;
}
} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 88356d51049..970037bfdfa 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -377,16 +377,15 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, &chunksQuery, newOpTime] {
- vector<ChunkType> chunks;
OpTime opTime;
- ASSERT_OK(catalogClient()->getChunks(operationContext(),
- chunksQuery,
- BSON(ChunkType::lastmod() << -1),
- 1,
- &chunks,
- &opTime,
- repl::ReadConcernLevel::kMajorityReadConcern));
+ const auto chunks =
+ assertGet(catalogClient()->getChunks(operationContext(),
+ chunksQuery,
+ BSON(ChunkType::lastmod() << -1),
+ 1,
+ &opTime,
+ repl::ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQ(2U, chunks.size());
ASSERT_EQ(newOpTime, opTime);
@@ -434,15 +433,13 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
<< BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong()))));
auto future = launchAsync([this, &chunksQuery] {
- vector<ChunkType> chunks;
-
- ASSERT_OK(catalogClient()->getChunks(operationContext(),
- chunksQuery,
- BSONObj(),
- boost::none,
- &chunks,
- nullptr,
- repl::ReadConcernLevel::kMajorityReadConcern));
+ const auto chunks =
+ assertGet(catalogClient()->getChunks(operationContext(),
+ chunksQuery,
+ BSONObj(),
+ boost::none,
+ nullptr,
+ repl::ReadConcernLevel::kMajorityReadConcern));
ASSERT_EQ(0U, chunks.size());
return chunks;
@@ -481,17 +478,15 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
<< BSON("$gte" << static_cast<long long>(queryChunkVersion.toLong()))));
auto future = launchAsync([this, &chunksQuery] {
- vector<ChunkType> chunks;
- Status status = catalogClient()->getChunks(operationContext(),
- chunksQuery,
- BSONObj(),
- boost::none,
- &chunks,
- nullptr,
- repl::ReadConcernLevel::kMajorityReadConcern);
-
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
- ASSERT_EQ(0U, chunks.size());
+ const auto swChunks =
+ catalogClient()->getChunks(operationContext(),
+ chunksQuery,
+ BSONObj(),
+ boost::none,
+ nullptr,
+ repl::ReadConcernLevel::kMajorityReadConcern);
+
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, swChunks.getStatus());
});
onFindCommand([&chunksQuery](const RemoteCommandRequest& request) {
@@ -797,16 +792,14 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
const OpTime newOpTime(Timestamp(7, 6), 5);
auto future = launchAsync([this, newOpTime] {
- vector<CollectionType> collections;
OpTime opTime;
- const auto status =
- catalogClient()->getCollections(operationContext(), nullptr, &collections, &opTime);
+ const auto& collections =
+ assertGet(catalogClient()->getCollections(operationContext(), nullptr, &opTime));
- ASSERT_OK(status);
ASSERT_EQ(newOpTime, opTime);
- return collections;
+ return std::move(collections);
});
onFindWithMetadataCommand(
@@ -859,13 +852,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
auto future = launchAsync([this] {
string dbName = "test";
- vector<CollectionType> collections;
- const auto status =
- catalogClient()->getCollections(operationContext(), &dbName, &collections, nullptr);
-
- ASSERT_OK(status);
- return collections;
+ return assertGet(catalogClient()->getCollections(operationContext(), &dbName, nullptr));
});
onFindCommand([this, coll1, coll2](const RemoteCommandRequest& request) {
@@ -900,13 +888,11 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
auto future = launchAsync([this] {
string dbName = "test";
- vector<CollectionType> collections;
- const auto status =
- catalogClient()->getCollections(operationContext(), &dbName, &collections, nullptr);
+ const auto swCollections =
+ catalogClient()->getCollections(operationContext(), &dbName, nullptr);
- ASSERT_EQ(ErrorCodes::FailedToParse, status);
- ASSERT_EQ(0U, collections.size());
+ ASSERT_EQ(ErrorCodes::FailedToParse, swCollections.getStatus());
});
CollectionType validColl;
@@ -956,12 +942,8 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
dbt2.setPrimary(ShardId("shard0000"));
auto future = launchAsync([this] {
- vector<string> dbs;
- const auto status =
- catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000"), &dbs);
-
- ASSERT_OK(status);
- return dbs;
+ return assertGet(
+ catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000")));
});
onFindCommand([this, dbt1, dbt2](const RemoteCommandRequest& request) {
@@ -993,12 +975,10 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardInvalidDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- vector<string> dbs;
- const auto status =
- catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000"), &dbs);
+ const auto swDatabaseNames =
+ catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000"));
- ASSERT_EQ(ErrorCodes::TypeMismatch, status);
- ASSERT_EQ(0U, dbs.size());
+ ASSERT_EQ(ErrorCodes::TypeMismatch, swDatabaseNames.getStatus());
});
onFindCommand([](const RemoteCommandRequest& request) {
@@ -1031,10 +1011,9 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
tagB.setMaxKey(BSON("a" << 300));
auto future = launchAsync([this] {
- vector<TagsType> tags;
+ const auto& tags =
+ assertGet(catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl"));
- ASSERT_OK(
- catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
ASSERT_EQ(2U, tags.size());
return tags;
@@ -1067,10 +1046,9 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollectionNoTags) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- vector<TagsType> tags;
+ const auto& tags =
+ assertGet(catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl"));
- ASSERT_OK(
- catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags));
ASSERT_EQ(0U, tags.size());
return tags;
@@ -1085,12 +1063,10 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollectionInvalidTag) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
- vector<TagsType> tags;
- Status status =
- catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl", &tags);
+ const auto swTags =
+ catalogClient()->getTagsForCollection(operationContext(), "TestDB.TestColl");
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
- ASSERT_EQ(0U, tags.size());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, swTags.getStatus());
});
onFindCommand([](const RemoteCommandRequest& request) {
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index d28c55cd573..dca7f532cb2 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -302,10 +302,9 @@ std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(Oper
const auto& dbDesc = opTimeWithDb.value;
// Load the sharded collections entries
- std::vector<CollectionType> collections;
repl::OpTime collLoadConfigOptime;
- uassertStatusOK(
- catalogClient->getCollections(opCtx, &dbNameCopy, &collections, &collLoadConfigOptime));
+ const std::vector<CollectionType> collections =
+ uassertStatusOK(catalogClient->getCollections(opCtx, &dbNameCopy, &collLoadConfigOptime));
StringMap<CollectionRoutingInfoEntry> collectionEntries;
for (const auto& coll : collections) {
diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp
index aea2fa2429f..72043c4d206 100644
--- a/src/mongo/s/commands/cluster_commands_helpers.cpp
+++ b/src/mongo/s/commands/cluster_commands_helpers.cpp
@@ -416,9 +416,8 @@ std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opC
StringData dbName) {
const auto dbNameStr = dbName.toString();
- std::vector<CollectionType> collectionsOnConfig;
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->getCollections(
- opCtx, &dbNameStr, &collectionsOnConfig, nullptr));
+ const std::vector<CollectionType> collectionsOnConfig = uassertStatusOK(
+ Grid::get(opCtx)->catalogClient()->getCollections(opCtx, &dbNameStr, nullptr));
std::vector<NamespaceString> collectionsToReturn;
for (const auto& coll : collectionsOnConfig) {
diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp
index e5f79d68f73..5f46e72fb20 100644
--- a/src/mongo/s/config_server_catalog_cache_loader.cpp
+++ b/src/mongo/s/config_server_catalog_cache_loader.cpp
@@ -107,14 +107,12 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx,
const auto diffQuery = createConfigDiffQuery(nss, startingCollectionVersion);
// Query the chunks which have changed
- std::vector<ChunkType> changedChunks;
repl::OpTime opTime;
- uassertStatusOK(
+ const std::vector<ChunkType> changedChunks = uassertStatusOK(
Grid::get(opCtx)->catalogClient()->getChunks(opCtx,
diffQuery.query,
diffQuery.sort,
boost::none,
- &changedChunks,
&opTime,
repl::ReadConcernLevel::kMajorityReadConcern));