summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2017-03-08 09:39:22 -0500
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-03-13 16:36:45 -0400
commit9af191dc136f39e5cb96b3c805a1e392ad3637f9 (patch)
treee69c0f571b239477087660b35b9be8926a08a01d
parent965dc76f4b4e27f7a9e3bc7810b608c53085d32f (diff)
downloadmongo-9af191dc136f39e5cb96b3c805a1e392ad3637f9.tar.gz
SERVER-28176 read chunks collections on shards
-rw-r--r--src/mongo/db/s/metadata_loader.cpp25
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp268
-rw-r--r--src/mongo/db/s/shard_metadata_util.h62
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp41
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp1
-rw-r--r--src/mongo/s/catalog/type_chunk.h3
6 files changed, 310 insertions, 90 deletions
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index bdb68ae0810..ee3714b71ac 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -188,27 +188,12 @@ Status MetadataLoader::_initChunks(OperationContext* opCtx,
ns, &metadata->_chunksMap, &metadata->_collVersion, &versionMap, shard);
try {
- const auto diffQuery = SCMConfigDiffTracker::createConfigDiffQuery(NamespaceString(ns),
- metadata->_collVersion);
- std::vector<ChunkType> chunks;
- Status status = catalogClient->getChunks(opCtx,
- diffQuery.query,
- diffQuery.sort,
- boost::none,
- &chunks,
- nullptr,
- repl::ReadConcernLevel::kMajorityReadConcern);
- if (!status.isOK()) {
- return status;
- }
-
- if (isShardPrimary) {
- status = shardmetadatautil::writeNewChunks(
- opCtx, NamespaceString(ns), chunks, metadata->_collVersion.epoch());
- if (!status.isOK()) {
- return status;
- }
+ auto statusWithChunks = shardmetadatautil::getChunks(
+ opCtx, catalogClient, NamespaceString(ns), metadata->_collVersion, isShardPrimary);
+ if (!statusWithChunks.isOK()) {
+ return statusWithChunks.getStatus();
}
+ auto chunks = statusWithChunks.getValue();
//
// The diff tracker should always find at least one chunk (the highest chunk we saw
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index eebd0a59109..da65af9b018 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/write_concern_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/unique_message.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard_collection.h"
@@ -51,6 +52,72 @@ const WriteConcernOptions kLocalWriteConcern(1,
WriteConcernOptions::SyncMode::UNSET,
Milliseconds(0));
+/**
+ * Structure representing the generated query and sort order for a chunk diffing operation.
+ */
+struct QueryAndSort {
+ const BSONObj query;
+ const BSONObj sort;
+};
+
+/**
+ * Returns a sort by ascending lastmod value.
+ */
+BSONObj createChunkDiffQuerySort() {
+ // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, IN ORDER TO HANDLE CURSOR
+ // YIELDING BETWEEN CHUNKS OPERATIONS (SPLIT, MERGE, MOVE).
+ //
+ // This ensures that changes to chunk version (which will always be greater than any preceeding
+ // update) will always come *after* our current position in the chunk cursor, using an index on
+ // lastmod.
+ return BSON(ChunkType::DEPRECATED_lastmod() << 1);
+}
+
+/**
+ * Returns the query needed to find incremental changes to the config.chunks collection on the
+ * config server.
+ *
+ * {"ns": nss, "lastmod": {"$gte": collectionVersion}}
+ */
+QueryAndSort createConfigChunkDiffQuery(const NamespaceString& nss,
+ const ChunkVersion& collectionVersion) {
+ // The query has to find all the chunks $gte the server's current collection version. Splits,
+ // merges and moves will increment the collection version, so all updates are seen. The equal
+ // part of $gte is necessary because no chunk results indicates the collection has been dropped.
+ BSONObjBuilder queryBuilder;
+ queryBuilder.append(ChunkType::ns(), nss.ns());
+ {
+ BSONObjBuilder lastmodBuilder(queryBuilder.subobjStart(ChunkType::DEPRECATED_lastmod()));
+ lastmodBuilder.appendTimestamp("$gte", collectionVersion.toLong());
+ lastmodBuilder.done();
+ }
+
+ return QueryAndSort{queryBuilder.obj(), createChunkDiffQuerySort()};
+}
+
+/**
+ * Returns the query needed to find incremental changes to a config.chunks.ns collection on a shard
+ * server.
+ *
+ * {"lastmod": {"$gte": collectionVersion, "$lte": lastConsistentCollectionVersion}}
+ */
+QueryAndSort createShardChunkDiffQuery(const ChunkVersion& collectionVersion,
+ const ChunkVersion& lastConsistentCollectionVersion) {
+ // The query has to find all the chunks $gte the server's current collection version, and $lte
+ // to the last consistent collection version the shard primary set. Splits, merges and moves
+ // will increment the collection version, so all updates are seen. The equal part of $gte is
+ // necessary because no chunk results indicates the collection has been dropped.
+ BSONObjBuilder queryBuilder;
+ {
+ BSONObjBuilder lastmodBuilder(queryBuilder.subobjStart(ChunkType::DEPRECATED_lastmod()));
+ lastmodBuilder.appendTimestamp("$gte", collectionVersion.toLong());
+ lastmodBuilder.appendTimestamp("$lte", lastConsistentCollectionVersion.toLong());
+ lastmodBuilder.done();
+ }
+
+ return QueryAndSort{queryBuilder.obj(), createChunkDiffQuerySort()};
+}
+
} // namespace
StatusWith<std::pair<BSONObj, OID>> getCollectionShardKeyAndEpoch(
@@ -63,8 +130,7 @@ StatusWith<std::pair<BSONObj, OID>> getCollectionShardKeyAndEpoch(
auto statusWithColl = catalogClient->getCollection(opCtx, nss.ns());
if (!statusWithColl.isOK()) {
if (statusWithColl.getStatus() == ErrorCodes::NamespaceNotFound) {
- auto status = dropChunksAndDeleteCollectionsEntry(
- opCtx, NamespaceString(ChunkType::ConfigNS + "." + nss.ns()), nss);
+ auto status = dropChunksAndDeleteCollectionsEntry(opCtx, nss);
if (!status.isOK()) {
return status;
}
@@ -75,16 +141,19 @@ StatusWith<std::pair<BSONObj, OID>> getCollectionShardKeyAndEpoch(
auto collInfo = statusWithColl.getValue().value;
// Update the shard's config.collections entry so that secondaries receive any changes.
- Status updateStatus = updateCollectionEntry(opCtx,
- nss,
- BSON(ShardCollectionType::uuid(nss.ns())),
- ShardCollectionType(collInfo).toBSON());
+ Status updateStatus = updateShardCollectionEntry(opCtx,
+ nss,
+ BSON(ShardCollectionType::uuid(nss.ns())),
+ ShardCollectionType(collInfo).toBSON());
if (!updateStatus.isOK()) {
return updateStatus;
}
return std::pair<BSONObj, OID>(collInfo.getKeyPattern().toBSON(), collInfo.getEpoch());
} else { // shard secondary
+ // TODO: a secondary must wait and retry if the entry is not found or does not yet have a
+ // lastConsistentCollectionVersion.
+
auto statusWithCollectionEntry = readShardCollectionEntry(opCtx, nss);
if (!statusWithCollectionEntry.isOK()) {
return statusWithCollectionEntry.getStatus();
@@ -103,7 +172,6 @@ StatusWith<std::pair<BSONObj, OID>> getCollectionShardKeyAndEpoch(
}
}
-
StatusWith<ShardCollectionType> readShardCollectionEntry(OperationContext* opCtx,
const NamespaceString& nss) {
Query fullQuery(BSON(ShardCollectionType::uuid() << nss.ns()));
@@ -131,9 +199,6 @@ StatusWith<ShardCollectionType> readShardCollectionEntry(OperationContext* opCtx
return statusWithCollectionEntry.getStatus();
}
- // There should only be one entry per namespace!
- invariant(!cursor->more());
-
return statusWithCollectionEntry.getValue();
} catch (const DBException& ex) {
return {ex.toStatus().code(),
@@ -143,10 +208,10 @@ StatusWith<ShardCollectionType> readShardCollectionEntry(OperationContext* opCtx
}
}
-Status updateCollectionEntry(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& update) {
+Status updateShardCollectionEntry(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& update) {
const BSONElement idField = query.getField("_id");
invariant(!idField.eoo());
@@ -188,11 +253,128 @@ Status updateCollectionEntry(OperationContext* opCtx,
}
}
+StatusWith<std::vector<ChunkType>> getChunks(OperationContext* opCtx,
+ ShardingCatalogClient* catalogClient,
+ const NamespaceString& nss,
+ const ChunkVersion& collectionVersion,
+ bool isShardPrimary) {
+ if (isShardPrimary) {
+ // Get the chunks from the config server.
+ std::vector<ChunkType> chunks;
+ QueryAndSort diffQuery = createConfigChunkDiffQuery(nss, collectionVersion);
+ Status status = catalogClient->getChunks(opCtx,
+ diffQuery.query,
+ diffQuery.sort,
+ boost::none,
+ &chunks,
+ nullptr,
+ repl::ReadConcernLevel::kMajorityReadConcern);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (chunks.empty()) {
+ // This means that the collection was dropped because the query does $gte a version it
+ // already has: the query should always find that version or a greater one, never
+ // nothing.
+ status = dropChunksAndDeleteCollectionsEntry(opCtx, nss);
+ if (!status.isOK()) {
+ return status;
+ }
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "Could not load metadata because collection " << nss.ns()
+ << " was dropped"};
+ }
+
+ // Persist copies locally on the shard.
+ status = shardmetadatautil::writeNewChunks(opCtx, nss, chunks, collectionVersion.epoch());
+ if (!status.isOK()) {
+ return status;
+ }
+
+ return chunks;
+ } else { // shard secondary
+ // Get the chunks from this shard.
+ auto statusWithChunks = readShardChunks(opCtx, nss, collectionVersion);
+ if (!statusWithChunks.isOK()) {
+ return statusWithChunks.getStatus();
+ }
+
+ if (statusWithChunks.getValue().empty()) {
+ // If no chunks were found, then the collection has been dropped since the refresh
+ // began.
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "Could not load metadata because collection " << nss.ns()
+ << " was dropped"};
+ }
+
+ return statusWithChunks.getValue();
+ }
+}
+
+StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ChunkVersion& collectionVersion) {
+ // Get the lastConsistentCollectionVersion from the config.collections entry for 'nss'.
+ auto statusWithShardCollectionType = readShardCollectionEntry(opCtx, nss);
+ if (!statusWithShardCollectionType.isOK()) {
+ return statusWithShardCollectionType.getStatus();
+ }
+ ShardCollectionType shardCollectionType = statusWithShardCollectionType.getValue();
+
+ if (!shardCollectionType.isLastConsistentCollectionVersionSet()) {
+ // The collection has been dropped and recreated since the refresh began.
+ return {ErrorCodes::NamespaceNotFound,
+ str::stream() << "Could not load metadata because collection " << nss.ns()
+ << " was dropped"};
+ }
+
+ // Query to retrieve the chunks.
+ QueryAndSort diffQuery = createShardChunkDiffQuery(
+ collectionVersion, shardCollectionType.getLastConsistentCollectionVersion());
+ Query fullQuery(diffQuery.query);
+ fullQuery.sort(diffQuery.sort);
+ fullQuery.readPref(ReadPreference::SecondaryOnly, BSONArray());
+
+ try {
+ DBDirectClient client(opCtx);
+
+ std::string chunkMetadataNs = ChunkType::ShardNSPrefix + nss.ns();
+ std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNs, fullQuery, 0LL);
+
+ if (!cursor) {
+ return {ErrorCodes::OperationFailed,
+ str::stream() << "Failed to establish a cursor for reading " << chunkMetadataNs
+ << " from local storage"};
+ }
+
+ std::vector<ChunkType> chunks;
+ while (cursor->more()) {
+ BSONObj document = cursor->nextSafe().getOwned();
+ auto statusWithChunk = ChunkType::fromShardBSON(
+ document, shardCollectionType.getLastConsistentCollectionVersion().epoch());
+ if (!statusWithChunk.isOK()) {
+ return {statusWithChunk.getStatus().code(),
+ str::stream() << "Failed to parse chunk '" << document.toString()
+ << "' due to "
+ << statusWithChunk.getStatus().reason()};
+ }
+ chunks.push_back(std::move(statusWithChunk.getValue()));
+ }
+
+ return chunks;
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
+}
+
Status writeNewChunks(OperationContext* opCtx,
const NamespaceString& nss,
const std::vector<ChunkType>& chunks,
const OID& currEpoch) {
- NamespaceString chunkMetadataNss(ChunkType::ConfigNS + "." + nss.ns());
+ invariant(!chunks.empty());
+
+ NamespaceString chunkMetadataNss(ChunkType::ShardNSPrefix + nss.ns());
try {
DBDirectClient client(opCtx);
@@ -224,7 +406,7 @@ Status writeNewChunks(OperationContext* opCtx,
if (!chunk.getVersion().hasEqualEpoch(currEpoch)) {
// This means the collection was dropped and recreated. Drop the chunk metadata
// and return.
- Status status = dropChunksAndDeleteCollectionsEntry(opCtx, chunkMetadataNss, nss);
+ Status status = dropChunksAndDeleteCollectionsEntry(opCtx, nss);
if (!status.isOK()) {
return status;
}
@@ -288,18 +470,16 @@ Status writeNewChunks(OperationContext* opCtx,
}
// Must update the config.collections 'lastConsistentCollectionVersion' field so that
- // secondaries can load the latest config.chunks.ns writes.
- if (!chunks.empty()) {
- BSONObjBuilder builder;
- chunks.back().getVersion().appendWithFieldForCommands(
- &builder, ShardCollectionType::lastConsistentCollectionVersion());
- BSONObj update = builder.obj();
-
- auto collUpdateStatus = updateCollectionEntry(
- opCtx, nss, BSON(ShardCollectionType::uuid(nss.ns())), update);
- if (!collUpdateStatus.isOK()) {
- return collUpdateStatus;
- }
+ // secondaries can load the latest chunk writes.
+ BSONObjBuilder builder;
+ chunks.back().getVersion().appendWithFieldForCommands(
+ &builder, ShardCollectionType::lastConsistentCollectionVersion());
+ BSONObj update = builder.obj();
+
+ auto collUpdateStatus = updateShardCollectionEntry(
+ opCtx, nss, BSON(ShardCollectionType::uuid(nss.ns())), update);
+ if (!collUpdateStatus.isOK()) {
+ return collUpdateStatus;
}
return Status::OK();
@@ -308,27 +488,15 @@ Status writeNewChunks(OperationContext* opCtx,
}
}
-Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx,
- const NamespaceString& chunkMetadataNss,
- const NamespaceString& collectionsEntryNss) {
+Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const NamespaceString& nss) {
+ NamespaceString chunkMetadataNss(ChunkType::ShardNSPrefix + nss.ns());
+
try {
DBDirectClient client(opCtx);
- // Drop the config.chunks.ns collection specified by 'chunkMetadataNss'.
- rpc::UniqueReply commandResponse =
- client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
- "drop",
- rpc::makeEmptyMetadata(),
- BSON("drop" << chunkMetadataNss.coll()));
- Status status = getStatusFromCommandResult(commandResponse->getCommandReply());
-
- if (!status.isOK() && status.code() != ErrorCodes::NamespaceNotFound) {
- return status;
- }
-
- // Delete the config.collections entry matching 'collectionsEntryNss'.
+ // Delete the config.collections entry matching 'nss'.
auto deleteDocs(stdx::make_unique<BatchedDeleteDocument>());
- deleteDocs->setQuery(BSON(ShardCollectionType::uuid << collectionsEntryNss.ns()));
+ deleteDocs->setQuery(BSON(ShardCollectionType::uuid << nss.ns()));
deleteDocs->setLimit(0);
auto deleteRequest(stdx::make_unique<BatchedDeleteRequest>());
@@ -346,6 +514,16 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx,
return deleteStatus;
}
+ // Drop the config.chunks.ns collection specified by 'chunkMetadataNss'.
+ BSONObj result;
+ bool isOK = client.dropCollection(chunkMetadataNss.ns(), kLocalWriteConcern, &result);
+ if (!isOK) {
+ Status status = getStatusFromCommandResult(result);
+ if (!status.isOK() && status.code() != ErrorCodes::NamespaceNotFound) {
+ return status;
+ }
+ }
+
return Status::OK();
} catch (const DBException& ex) {
return ex.toStatus();
diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h
index 2acbd138a1c..4bfae1bd1e9 100644
--- a/src/mongo/db/s/shard_metadata_util.h
+++ b/src/mongo/db/s/shard_metadata_util.h
@@ -36,12 +36,13 @@
namespace mongo {
class BSONObj;
+struct ChunkVersion;
class ChunkType;
class CollectionMetadata;
-class ShardCollectionType;
class NamespaceString;
class OID;
class OperationContext;
+class ShardCollectionType;
class ShardingCatalogClient;
template <typename T>
class StatusWith;
@@ -56,6 +57,8 @@ namespace shardmetadatautil {
* Gets the config.collections for 'nss' entry either remotely from the config server if
* 'isShardPrimary' is true or locally from the shard if false. Additionally updates the shard's
* config.collections entry with the remotely retrieved metadata if 'isShardPrimary' is true.
+ *
+ * Returns NamespaceNotFound if the collection was dropped.
*/
StatusWith<std::pair<BSONObj, OID>> getCollectionShardKeyAndEpoch(
OperationContext* opCtx,
@@ -74,23 +77,46 @@ StatusWith<ShardCollectionType> readShardCollectionEntry(OperationContext* opCtx
* Only the fields specified in 'update' are modified.
* Sets upsert to true on the update operation in case the entry does not exist locally yet.
*/
-Status updateCollectionEntry(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& update);
+Status updateShardCollectionEntry(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& update);
+
+/**
+ * Gets the chunks for 'nss' that have lastmod versions equal to or higher than 'collectionVersion'.
+ * Retrieves the chunks from the config server's config.chunks collection if 'isShardPrimary is
+ * true; otherwise reads locally from the shard's chunks collection corresponding to 'nss'.
+ * Additionally updates the shard's chunks collection with the remotely retrieved chunks if
+ * 'isShardPrimary' is true.
+ *
+ * Returns NamespaceNotFound if the collection was dropped.
+ */
+StatusWith<std::vector<ChunkType>> getChunks(OperationContext* opCtx,
+ ShardingCatalogClient* catalogClient,
+ const NamespaceString& nss,
+ const ChunkVersion& collectionVersion,
+ bool isShardPrimary);
+
+/**
+ * Reads the shard server's chunks collection corresponding to 'nss' for chunks with lastmod gte
+ * 'collectionVersion'.
+ */
+StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ChunkVersion& collectionVersion);
/**
* Two threads running this function in parallel for the same collection can corrupt the collection
* data!
*
- * Takes a vector of 'chunks' and updates the config.chunks.ns collection specified by 'nss'.
- * Any chunk documents in config.chunks.ns that overlap with a chunk in 'chunks' is removed
- * as the new chunk document is inserted. If the epoch of any chunk in 'chunks' does not match
- * 'currEpoch', the chunk metadata is dropped and a RemoteChangeDetected error returned.
+ * Takes a vector of 'chunks' and updates the shard's chunks collection for 'nss'. Any chunk
+ * documents in config.chunks.ns that overlap with a chunk in 'chunks' is removed as the new chunk
+ * document is inserted. If the epoch of any chunk in 'chunks' does not match 'currEpoch', the chunk
+ * metadata is dropped and a RemoteChangeDetected error returned.
*
- * @nss - the regular collection namespace for which chunk metadata is being updated.
- * @chunks - chunks retrieved from the config server, sorted in ascending chunk version order
- * @currEpoch - what this shard server expects to be the collection epoch.
+ * nss - the regular collection namespace for which chunk metadata is being updated.
+ * chunks - chunks retrieved from the config server, sorted in ascending chunk version order
+ * currEpoch - what this shard server expects to be the collection epoch.
*
* Returns:
* - RemoteChangeDetected if the chunk version epoch of any chunk in 'chunks' is different than
@@ -103,12 +129,14 @@ Status writeNewChunks(OperationContext* opCtx,
const OID& currEpoch);
/**
- * Locally on this shard, drops the config.chunks.ns corresponding to 'chunkMetadataNss' and then
- * deletes the config.collections entry for 'collectionEntryNss'.
+ * Locally on this shard, deletes the config.collections entry for 'nss', then drops
+ * the corresponding chunks collection.
+ *
+ * The order is important because the secondary observes changes to the config.collections entries.
+ * If the chunks were dropped first, the secondary would keep refreshing until it exceeded its
+ * retries, rather than returning with a useful error message.
*/
-Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx,
- const NamespaceString& chunkMetadataNss,
- const NamespaceString& collectionsEntryNss);
+Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const NamespaceString& nss);
} // namespace shardmetadatautil
} // namespace mongo
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 79765f9720b..afd3d41504d 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -202,7 +202,7 @@ protected:
}
private:
- ChunkVersion _maxCollVersion{1, 0, OID::gen()};
+ ChunkVersion _maxCollVersion{0, 0, OID::gen()};
const KeyPattern _keyPattern{BSON("a" << 1)};
};
@@ -212,10 +212,11 @@ TEST_F(ShardMetadataUtilTest, UpdateAndReadCollectionsDocument) {
BSON(ShardCollectionType::uuid(kNss.ns())
<< ShardCollectionType::ns(kNss.ns())
<< ShardCollectionType::keyPattern(getKeyPattern().toBSON()));
- ASSERT_OK(shardmetadatautil::updateCollectionEntry(operationContext(),
- kNss,
- BSON(ShardCollectionType::uuid(kNss.ns())),
- shardCollectionTypeObj));
+ ASSERT_OK(
+ shardmetadatautil::updateShardCollectionEntry(operationContext(),
+ kNss,
+ BSON(ShardCollectionType::uuid(kNss.ns())),
+ shardCollectionTypeObj));
ShardCollectionType readShardCollectionType =
assertGet(shardmetadatautil::readShardCollectionEntry(operationContext(), kNss));
@@ -225,7 +226,7 @@ TEST_F(ShardMetadataUtilTest, UpdateAndReadCollectionsDocument) {
BSONObjBuilder updateBuilder;
getCollectionVersion().appendWithFieldForCommands(
&updateBuilder, ShardCollectionType::lastConsistentCollectionVersion());
- ASSERT_OK(shardmetadatautil::updateCollectionEntry(
+ ASSERT_OK(shardmetadatautil::updateShardCollectionEntry(
operationContext(), kNss, BSON(ShardCollectionType::uuid(kNss.ns())), updateBuilder.obj()));
readShardCollectionType =
@@ -245,6 +246,31 @@ TEST_F(ShardMetadataUtilTest, WriteNewChunks) {
checkChunks(kChunkMetadataNss, chunks);
}
+TEST_F(ShardMetadataUtilTest, WriteAndReadChunks) {
+ setUpCollection();
+
+ std::vector<ChunkType> chunks = makeFourChunks();
+ shardmetadatautil::writeNewChunks(
+ operationContext(), kNss, chunks, getCollectionVersion().epoch());
+ checkChunks(kChunkMetadataNss, chunks);
+
+ // read all the chunks
+ std::vector<ChunkType> readChunks = assertGet(shardmetadatautil::readShardChunks(
+ operationContext(), kNss, ChunkVersion(0, 0, getCollectionVersion().epoch())));
+ for (auto chunkIt = chunks.begin(), readChunkIt = readChunks.begin();
+ chunkIt != chunks.end() && readChunkIt != readChunks.end();
+ ++chunkIt, ++readChunkIt) {
+ ASSERT_BSONOBJ_EQ(chunkIt->toShardBSON(), readChunkIt->toShardBSON());
+ }
+
+ // read only the highest version chunk
+ readChunks = assertGet(
+ shardmetadatautil::readShardChunks(operationContext(), kNss, getCollectionVersion()));
+
+ ASSERT_TRUE(readChunks.size() == 1);
+ ASSERT_BSONOBJ_EQ(chunks.back().toShardBSON(), readChunks.front().toShardBSON());
+}
+
TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
// Load some chunk metadata.
@@ -304,8 +330,7 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
TEST_F(ShardMetadataUtilTest, DropChunksAndDeleteCollectionsEntry) {
setUpShardingMetadata();
- ASSERT_OK(shardmetadatautil::dropChunksAndDeleteCollectionsEntry(
- operationContext(), kChunkMetadataNss, kNss));
+ ASSERT_OK(shardmetadatautil::dropChunksAndDeleteCollectionsEntry(operationContext(), kNss));
checkCollectionIsEmpty(NamespaceString(ShardCollectionType::ConfigNS));
checkCollectionIsEmpty(kChunkMetadataNss);
}
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index e5091f16954..a114f86a681 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -43,6 +43,7 @@
namespace mongo {
const std::string ChunkType::ConfigNS = "config.chunks";
+const std::string ChunkType::ShardNSPrefix = "config.chunks.";
const BSONField<std::string> ChunkType::name("_id");
const BSONField<BSONObj> ChunkType::minShardID("_id");
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index 6484f97b03b..9827b5a3a79 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -133,6 +133,9 @@ public:
// Name of the chunks collection in the config server.
static const std::string ConfigNS;
+ // The shard chunks collections' common namespace prefix.
+ static const std::string ShardNSPrefix;
+
// Field names and types in the chunks collections.
static const BSONField<std::string> name;
static const BSONField<BSONObj> minShardID;