summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJack Mulrow <jack.mulrow@mongodb.com>2019-10-15 21:10:56 +0000
committerevergreen <evergreen@mongodb.com>2019-10-15 21:10:56 +0000
commitc2af35bc34c6ee187f05246cd8eefcac42fc01c9 (patch)
tree8dcad8384d07644cc39ea406a74e51fcd42fbda3 /src
parentf4c495f0848ba8bf6200b966c3bb4235fee7a7d9 (diff)
downloadmongo-c2af35bc34c6ee187f05246cd8eefcac42fc01c9.tar.gz
SERVER-42299 Upgrade/downgrade for config.chunks and config.tags
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/commands/set_feature_compatibility_version_command.cpp31
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.cpp17
-rw-r--r--src/mongo/db/s/active_shard_collection_registry.h8
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp1
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp245
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h22
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp126
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp6
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp15
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp72
-rw-r--r--src/mongo/s/catalog/type_chunk.h34
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp80
-rw-r--r--src/mongo/s/catalog/type_tags.cpp7
-rw-r--r--src/mongo/s/catalog/type_tags.h8
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp14
15 files changed, 638 insertions, 48 deletions
diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
index 6232dc35601..b49042bc85a 100644
--- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
+++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/s/active_shard_collection_registry.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -60,6 +61,8 @@ namespace {
MONGO_FAIL_POINT_DEFINE(featureCompatibilityDowngrade);
MONGO_FAIL_POINT_DEFINE(featureCompatibilityUpgrade);
+MONGO_FAIL_POINT_DEFINE(pauseBeforeDowngradingConfigMetadata); // TODO SERVER-44034: Remove.
+MONGO_FAIL_POINT_DEFINE(pauseBeforeUpgradingConfigMetadata); // TODO SERVER-44034: Remove.
/**
* Sets the minimum allowed version for the cluster. If it is 4.2, then the node should not use 4.4
@@ -173,6 +176,14 @@ public:
Lock::GlobalLock lk(opCtx, MODE_S);
}
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ // The primary shard sharding a collection will write the initial chunks for a
+ // collection directly to the config server, so wait for all shard collections to
+ // complete to guarantee no chunks are missed by the update on the config server.
+ ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
+ opCtx);
+ }
+
// Upgrade shards before config finishes its upgrade.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
uassertStatusOK(
@@ -183,6 +194,12 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
+
+ if (MONGO_unlikely(pauseBeforeUpgradingConfigMetadata.shouldFail())) {
+ log() << "Hit pauseBeforeUpgradingConfigMetadata";
+ pauseBeforeUpgradingConfigMetadata.pauseWhileSet(opCtx);
+ }
+ ShardingCatalogManager::get(opCtx)->upgradeChunksAndTags(opCtx);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
@@ -215,6 +232,14 @@ public:
Lock::GlobalLock lk(opCtx, MODE_S);
}
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ // The primary shard sharding a collection will write the initial chunks for a
+ // collection directly to the config server, so wait for all shard collections to
+ // complete to guarantee no chunks are missed by the update on the config server.
+ ActiveShardCollectionRegistry::get(opCtx).waitForActiveShardCollectionsToComplete(
+ opCtx);
+ }
+
// Downgrade shards before config finishes its downgrade.
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
uassertStatusOK(
@@ -225,6 +250,12 @@ public:
cmdObj,
BSON(FeatureCompatibilityVersionCommandParser::kCommandName
<< requestedVersion)))));
+
+ if (MONGO_unlikely(pauseBeforeDowngradingConfigMetadata.shouldFail())) {
+ log() << "Hit pauseBeforeDowngradingConfigMetadata";
+ pauseBeforeDowngradingConfigMetadata.pauseWhileSet(opCtx);
+ }
+ ShardingCatalogManager::get(opCtx)->downgradeChunksAndTags(opCtx);
}
FeatureCompatibilityVersion::unsetTargetUpgradeOrDowngrade(opCtx, requestedVersion);
diff --git a/src/mongo/db/s/active_shard_collection_registry.cpp b/src/mongo/db/s/active_shard_collection_registry.cpp
index d2bda7ece20..9b667370808 100644
--- a/src/mongo/db/s/active_shard_collection_registry.cpp
+++ b/src/mongo/db/s/active_shard_collection_registry.cpp
@@ -139,6 +139,23 @@ Status ActiveShardCollectionRegistry::ActiveShardCollectionState::constructError
<< "collection with arguments: " << activeRequest.toBSON()};
}
+void ActiveShardCollectionRegistry::waitForActiveShardCollectionsToComplete(
+ OperationContext* opCtx) {
+ // Take a snapshot of the currently active shard collections.
+ std::vector<SharedSemiFuture<boost::optional<UUID>>> shardCollectionFutures;
+ {
+ stdx::lock_guard<Latch> lk(_mutex);
+ for (const auto& it : _activeShardCollectionMap) {
+ shardCollectionFutures.emplace_back(it.second->_uuidPromise.getFuture());
+ }
+ }
+
+ // Synchronously wait for all futures to resolve.
+ for (const auto& fut : shardCollectionFutures) {
+ fut.wait(opCtx);
+ }
+}
+
ScopedShardCollection::ScopedShardCollection(std::string nss,
ActiveShardCollectionRegistry* registry,
bool shouldExecute,
diff --git a/src/mongo/db/s/active_shard_collection_registry.h b/src/mongo/db/s/active_shard_collection_registry.h
index 91423d65d7c..ba932d455e2 100644
--- a/src/mongo/db/s/active_shard_collection_registry.h
+++ b/src/mongo/db/s/active_shard_collection_registry.h
@@ -74,6 +74,14 @@ public:
StatusWith<ScopedShardCollection> registerShardCollection(
const ShardsvrShardCollection& request);
+ /**
+ * Takes a snapshot of all currently active shard collections and synchronously waits for each
+ * to complete.
+ *
+ * TODO SERVER-44034: Remove this method.
+ */
+ void waitForActiveShardCollectionsToComplete(OperationContext* opCtx);
+
private:
friend class ScopedShardCollection;
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 1b5f6fcf965..fe10ee8c6bf 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -58,7 +58,6 @@ void appendChunk(const NamespaceString& nss,
std::vector<ChunkType>* chunks) {
chunks->emplace_back(nss, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setName(OID::gen()); // TODO SERVER-42299: Remove this line.
chunk.setHistory({ChunkHistory(validAfter, shardId)});
version->incMinor();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 557529099ff..da229c14b5b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -33,8 +33,12 @@
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/auth/authorization_session_impl.h"
+#include "mongo/db/commands/txn_cmds_gen.h"
+#include "mongo/db/logical_session_cache.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/type_migration.h"
+#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -46,6 +50,9 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
+#include "mongo/s/write_ops/batched_command_request.h"
+#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/transport/service_entry_point.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -57,6 +64,75 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
const auto getShardingCatalogManager =
ServiceContext::declareDecoration<boost::optional<ShardingCatalogManager>>();
+OpMsg runCommandInLocalTxn(OperationContext* opCtx,
+ StringData db,
+ bool startTransaction,
+ TxnNumber txnNumber,
+ BSONObj cmdObj) {
+ BSONObjBuilder bob(std::move(cmdObj));
+ if (startTransaction) {
+ bob.append("startTransaction", true);
+ }
+ bob.append("autocommit", false);
+ bob.append(OperationSessionInfo::kTxnNumberFieldName, txnNumber);
+
+ BSONObjBuilder lsidBuilder(bob.subobjStart("lsid"));
+ opCtx->getLogicalSessionId()->serialize(&bob);
+ lsidBuilder.doneFast();
+
+ return OpMsg::parseOwned(
+ opCtx->getServiceContext()
+ ->getServiceEntryPoint()
+ ->handleRequest(opCtx,
+ OpMsgRequest::fromDBAndBody(db.toString(), bob.obj()).serialize())
+ .response);
+}
+
+void insertDocumentsInLocalTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ std::vector<BSONObj> docs,
+ bool startTransaction,
+ TxnNumber txnNumber) {
+ BatchedCommandRequest request([&] {
+ write_ops::Insert insertOp(nss);
+ insertOp.setDocuments(std::move(docs));
+ return insertOp;
+ }());
+
+ uassertStatusOK(getStatusFromWriteCommandReply(
+ runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
+}
+
+void removeDocumentsInLocalTxn(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ bool startTransaction,
+ TxnNumber txnNumber) {
+ BatchedCommandRequest request([&] {
+ write_ops::Delete deleteOp(nss);
+ deleteOp.setDeletes({[&] {
+ write_ops::DeleteOpEntry entry;
+ entry.setQ(query);
+ entry.setMulti(true);
+ return entry;
+ }()});
+ return deleteOp;
+ }());
+
+ uassertStatusOK(getStatusFromWriteCommandReply(
+ runCommandInLocalTxn(opCtx, nss.db(), startTransaction, txnNumber, request.toBSON()).body));
+}
+
+void commitLocalTxn(OperationContext* opCtx, TxnNumber txnNumber) {
+ uassertStatusOK(
+ getStatusFromCommandResult(runCommandInLocalTxn(opCtx,
+ NamespaceString::kAdminDb,
+ false /* startTransaction */,
+ txnNumber,
+ BSON(CommitTransaction::kCommandName << 1))
+ .body));
+}
+
} // namespace
void ShardingCatalogManager::create(ServiceContext* serviceContext,
@@ -329,4 +405,173 @@ Lock::ExclusiveLock ShardingCatalogManager::lockZoneMutex(OperationContext* opCt
return lk;
}
+void ShardingCatalogManager::upgradeChunksAndTags(OperationContext* opCtx) {
+ // Upgrade each chunk document by deleting and re-inserting with the 4.4 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& chunkObj : findResponse.docs) {
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(chunk.getNS().ns()) << ChunkType::min(chunk.getMin())),
+ true /* startTransaction */,
+ txnNumber);
+
+ // Note that ChunkType::toConfigBSON() will not include an _id if one hasn't been set,
+ // which will be the case for chunks written in the 4.2 format because parsing ignores
+ // _ids in the 4.2 format, so the insert path will generate one for us.
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ ChunkType::ConfigNS,
+ {chunk.toConfigBSON()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+
+ // Upgrade each tag document by deleting and re-inserting with the 4.4 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ TagsType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& tagObj : findResponse.docs) {
+ auto tag = uassertStatusOK(TagsType::fromBSON(tagObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(tag.getNS().ns()) << TagsType::min(tag.getMinKey())),
+ true /* startTransaction */,
+ txnNumber);
+
+ // Note that TagsType::toBSON() will not include an _id, so the insert path will
+ // generate one for us.
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ TagsType::ConfigNS,
+ {tag.toBSON()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+}
+
+void ShardingCatalogManager::downgradeChunksAndTags(OperationContext* opCtx) {
+ // Downgrade each chunk document by deleting and re-inserting with the 4.2 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& chunkObj : findResponse.docs) {
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(chunkObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns(chunk.getNS().ns()) << ChunkType::min(chunk.getMin())),
+ true /* startTransaction */,
+ txnNumber);
+
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ ChunkType::ConfigNS,
+ {chunk.toConfigBSONLegacyID()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+
+ // Downgrade each tag document by deleting and re-inserting with the 4.2 _id format.
+ {
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
+
+ auto const configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto findResponse = uassertStatusOK(
+ configShard->exhaustiveFindOnConfig(opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ TagsType::ConfigNS,
+ {},
+ {},
+ boost::none /* limit */));
+
+ AlternativeSessionRegion asr(opCtx);
+ AuthorizationSession::get(asr.opCtx()->getClient())
+ ->grantInternalAuthorization(asr.opCtx()->getClient());
+ TxnNumber txnNumber = 0;
+ for (const auto& tagObj : findResponse.docs) {
+ auto tag = uassertStatusOK(TagsType::fromBSON(tagObj));
+
+ removeDocumentsInLocalTxn(
+ asr.opCtx(),
+ TagsType::ConfigNS,
+ BSON(TagsType::ns(tag.getNS().ns()) << TagsType::min(tag.getMinKey())),
+ true /* startTransaction */,
+ txnNumber);
+
+ insertDocumentsInLocalTxn(asr.opCtx(),
+ TagsType::ConfigNS,
+ {tag.toBSONLegacyID()},
+ false /* startTransaction */,
+ txnNumber);
+
+ commitLocalTxn(asr.opCtx(), txnNumber);
+
+ txnNumber += 1;
+ }
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 586ad21c85f..6ebbdf264b3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -416,6 +416,28 @@ public:
*/
static void clearForTests(ServiceContext* serviceContext);
+ /**
+ * Changes the _id format of all documents in config.chunks and config.tags to use the format
+ * introduced in 4.4.
+ *
+ * TODO SERVER-44034: Remove this method.
+ *
+ * TODO SERVER-42299: Optimize this method by batching inserts and deletes into larger
+ * transactions.
+ */
+ void upgradeChunksAndTags(OperationContext* opCtx);
+
+ /**
+ * Changes the _id format of all documents in config.chunks and config.tags to use the format
+ * expected by a 4.2 binary.
+ *
+ * TODO SERVER-44034: Remove this method.
+ *
+ * TODO SERVER-42299: Optimize this method by batching inserts and deletes into larger
+ * transactions.
+ */
+ void downgradeChunksAndTags(OperationContext* opCtx);
+
Lock::ExclusiveLock lockZoneMutex(OperationContext* opCtx);
private:
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 13040b176b9..e8e6fca56c9 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -43,6 +43,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/sharding_logging.h"
+#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -62,6 +63,13 @@ MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory);
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
+bool isUpgradingOrDowngradingFCV() {
+ return (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) ||
+ (serverGlobalParams.featureCompatibility.getVersion() ==
+ ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42);
+}
+
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
*/
@@ -78,6 +86,8 @@ void appendShortVersion(BufBuilder* out, const ChunkType& chunk) {
BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunksToMerge,
const ChunkVersion& mergeVersion,
const boost::optional<Timestamp>& validAfter) {
+ invariant(!isUpgradingOrDowngradingFCV());
+
BSONArrayBuilder updates;
// Build an update operation to expand the first chunk into the newly merged chunk
@@ -98,10 +108,18 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
mergedChunk.setHistory({ChunkHistory(validAfter.get(), mergedChunk.getShard())});
// add the new chunk information as the update object
- op.append("o", mergedChunk.toConfigBSON());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ op.append("o", mergedChunk.toConfigBSON());
- // query object
- op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
+ // query object
+ op.append("o2", BSON(ChunkType::name(mergedChunk.getName())));
+ } else {
+ op.append("o", mergedChunk.toConfigBSONLegacyID());
+
+ // query object
+ op.append("o2", BSON(ChunkType::legacyName(mergedChunk.getLegacyName())));
+ }
updates.append(op.obj());
}
@@ -113,7 +131,12 @@ BSONArray buildMergeChunksTransactionUpdates(const std::vector<ChunkType>& chunk
op.append("op", "d");
op.append("ns", ChunkType::ConfigNS.ns());
- op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ op.append("o", BSON(ChunkType::name(chunksToMerge[i].getName())));
+ } else {
+ op.append("o", BSON(ChunkType::legacyName(chunksToMerge[i].getLegacyName())));
+ }
updates.append(op.obj());
}
@@ -179,6 +202,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
const boost::optional<ChunkType>& controlChunk,
StringData fromShard,
StringData toShard) {
+ invariant(!isUpgradingOrDowngradingFCV());
// Update migratedChunk's version and shard.
BSONArrayBuilder updates;
@@ -189,7 +213,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), migratedChunk.getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), migratedChunk.getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
+ }
migratedChunk.getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), migratedChunk.getMin());
@@ -199,7 +228,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), migratedChunk.getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), migratedChunk.getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, migratedChunk.getMin()));
+ }
q.done();
updates.append(op.obj());
@@ -213,7 +247,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
op.append("ns", ChunkType::ConfigNS.ns());
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), controlChunk->getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), controlChunk->getName());
+ } else {
+ n.append(ChunkType::legacyName(), ChunkType::genLegacyID(nss, controlChunk->getMin()));
+ }
controlChunk->getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), controlChunk->getMin());
@@ -224,7 +263,12 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), controlChunk->getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), controlChunk->getName());
+ } else {
+ q.append(ChunkType::legacyName(), controlChunk->getLegacyName());
+ }
q.done();
updates.append(op.obj());
@@ -298,6 +342,14 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Splitting a chunk requires knowing the
+ // _id of the chunk being split, so to avoid confusing failures, splitting is disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be split while a feature compatibility version upgrade or downgrade is "
+ "in progress",
+ !isUpgradingOrDowngradingFCV());
+
// Get the max chunk version for this namespace.
auto swCollVersion = getMaxChunkVersionFromQueryResponse(
nss,
@@ -366,7 +418,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
newChunkBounds.push_back(range.getMax());
auto shouldTakeOriginalChunkID = true;
- std::string chunkID;
+ OID chunkID;
+ std::string legacyChunkID;
BSONArrayBuilder updates;
@@ -406,8 +459,13 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// First chunk takes ID of the original chunk and all other chunks get new IDs. This occurs
// because we perform an update operation below (with upsert true). Keeping the original ID
// ensures we overwrite the old chunk (before the split) without having to perform a delete.
- chunkID =
- shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen().toString();
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ chunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getName() : OID::gen();
+ } else {
+ legacyChunkID = shouldTakeOriginalChunkID ? origChunk.getValue().getLegacyName()
+ : ChunkType::genLegacyID(nss, startKey);
+ }
shouldTakeOriginalChunkID = false;
// build an update operation against the chunks collection of the config database
@@ -419,7 +477,12 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), chunkID);
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ n.append(ChunkType::name(), chunkID);
+ } else {
+ n.append(ChunkType::legacyName(), legacyChunkID);
+ }
currentMaxVersion.appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
@@ -432,7 +495,12 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), chunkID);
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ q.append(ChunkType::name(), chunkID);
+ } else {
+ q.append(ChunkType::legacyName(), legacyChunkID);
+ }
q.done();
updates.append(op.obj());
@@ -527,6 +595,14 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
// move chunks on different collections to proceed in parallel
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Merging a chunk requires knowing the
+ // _id of the chunks being merged, so to avoid confusing failures, merging is disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be merged while a feature compatibility version upgrade or downgrade is "
+ "in progress",
+ !isUpgradingOrDowngradingFCV());
+
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -580,7 +656,11 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
if (!itOrigChunk.isOK()) {
return itOrigChunk.getStatus();
}
- itChunk.setName(itOrigChunk.getValue().getName());
+
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ itChunk.setName(itOrigChunk.getValue().getName());
+ }
// Ensure the chunk boundaries are strictly increasing
if (chunkBoundaries[i].woCompare(itChunk.getMin()) <= 0) {
@@ -654,6 +734,14 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// (Note: This is not needed while we have a global lock, taken here only for consistency.)
Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
+ // The _id format for chunk documents changed in 4.4, so during an upgrade or downgrade it is
+ // not known which format the chunks are currently in. Moving a chunk requires knowing the
+ // _id of the chunks being moved, so to avoid confusing failures, migrations are disabled.
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "Chunks cannot be migrated while a feature compatibility version upgrade or downgrade "
+ "is in progress",
+ !isUpgradingOrDowngradingFCV());
+
if (!validAfter) {
return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"};
}
@@ -724,7 +812,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
// Generate the new versions of migratedChunk and controlChunk. Migrating chunk's minor version
// will be 0.
ChunkType newMigratedChunk = migratedChunk;
- newMigratedChunk.setName(origChunk.getValue().getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ newMigratedChunk.setName(origChunk.getValue().getName());
+ }
newMigratedChunk.setShard(toShard);
newMigratedChunk.setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 0, currentCollectionVersion.epoch()));
@@ -768,7 +859,10 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
newControlChunk = origControlChunk.getValue();
- newControlChunk->setName(origControlChunk.getValue().getName());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) {
+ newControlChunk->setName(origControlChunk.getValue().getName());
+ }
newControlChunk->setVersion(ChunkVersion(
currentCollectionVersion.majorVersion() + 1, 1, currentCollectionVersion.epoch()));
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 9b5b8eb0f8a..153319734e4 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -37,6 +37,7 @@
#include "mongo/client/read_preference.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/balancer_policy.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_collection.h"
@@ -372,6 +373,11 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
BSONObj updateQuery(BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
BSONObjBuilder updateBuilder;
+ if (serverGlobalParams.featureCompatibility.getVersion() <=
+ ServerGlobalParams::FeatureCompatibility::Version::kDowngradingTo42) {
+ updateBuilder.append(
+ "_id", BSON(TagsType::ns(nss.ns()) << TagsType::min(fullShardKeyRange.getMin())));
+ }
updateBuilder.append(TagsType::ns(), nss.ns());
updateBuilder.append(TagsType::min(), fullShardKeyRange.getMin());
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index 211717731aa..448f3d1b451 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -62,6 +62,7 @@
#include "mongo/s/request_types/clone_collection_options_from_primary_shard_gen.h"
#include "mongo/s/request_types/shard_collection_gen.h"
#include "mongo/s/shard_util.h"
+#include "mongo/util/fail_point.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/str.h"
@@ -70,6 +71,8 @@ namespace mongo {
namespace {
+MONGO_FAIL_POINT_DEFINE(pauseShardCollectionBeforeReturning);
+
struct ShardCollectionTargetState {
UUID uuid;
ShardKeyPattern shardKeyPattern;
@@ -607,7 +610,12 @@ void writeFirstChunksToConfig(OperationContext* opCtx,
std::vector<BSONObj> chunkObjs;
chunkObjs.reserve(initialChunks.chunks.size());
for (const auto& chunk : initialChunks.chunks) {
- chunkObjs.push_back(chunk.toConfigBSON());
+ if (serverGlobalParams.featureCompatibility.getVersion() >=
+ ServerGlobalParams::FeatureCompatibility::Version::kUpgradingTo44) {
+ chunkObjs.push_back(chunk.toConfigBSON());
+ } else {
+ chunkObjs.push_back(chunk.toConfigBSONLegacyID());
+ }
}
Grid::get(opCtx)->catalogClient()->insertConfigDocumentsAsRetryableWrite(
@@ -856,6 +864,11 @@ public:
str::stream() << "Collection " << nss << " is sharded without UUID",
uuid);
+ if (MONGO_unlikely(pauseShardCollectionBeforeReturning.shouldFail())) {
+ log() << "Hit pauseShardCollectionBeforeReturning";
+ pauseShardCollectionBeforeReturning.pauseWhileSet(opCtx);
+ }
+
scopedShardCollection.emplaceUUID(uuid);
}
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 099c667bb41..fd7e9313148 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -46,7 +46,8 @@ namespace mongo {
const NamespaceString ChunkType::ConfigNS("config.chunks");
const std::string ChunkType::ShardNSPrefix = "config.cache.chunks.";
-const BSONField<std::string> ChunkType::name("_id");
+const BSONField<OID> ChunkType::name("_id");
+const BSONField<std::string> ChunkType::legacyName("_id");
const BSONField<BSONObj> ChunkType::minShardID("_id");
const BSONField<std::string> ChunkType::ns("ns");
const BSONField<BSONObj> ChunkType::min("min");
@@ -214,12 +215,18 @@ StatusWith<ChunkType> ChunkType::parseFromConfigBSONCommand(const BSONObj& sourc
ChunkType chunk;
{
- std::string chunkID;
- Status status = bsonExtractStringField(source, name.name(), &chunkID);
+ OID chunkID;
+ Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
} else if (status == ErrorCodes::NoSuchKey || status == ErrorCodes::TypeMismatch) {
- // ID status is missing or of type objectid, so we just ignore it.
+ // Ignore NoSuchKey because when chunks are sent in commands they are not required to
+ // include it.
+ //
+ // Ignore TypeMismatch for compatibility with binaries 4.2 and earlier, since the _id
+ // type was changed from string to OID.
+ //
+ // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -300,12 +307,15 @@ StatusWith<ChunkType> ChunkType::fromConfigBSON(const BSONObj& source) {
if (!chunk._id) {
{
- std::string chunkID;
- Status status = bsonExtractStringField(source, name.name(), &chunkID);
+ OID chunkID;
+ Status status = bsonExtractOIDField(source, name.name(), &chunkID);
if (status.isOK()) {
chunk._id = chunkID;
} else if (status == ErrorCodes::TypeMismatch) {
- // ID status is of type objectid, so we just ignore it.
+ // The format of _id changed between 4.2 and 4.4 so for compatibility with chunks
+ // created in earlier versions we ignore TypeMismatch.
+ //
+ // TODO SERVER-44034: Stop ignoring TypeMismatch.
} else {
return status;
}
@@ -335,6 +345,26 @@ BSONObj ChunkType::toConfigBSON() const {
return builder.obj();
}
+BSONObj ChunkType::toConfigBSONLegacyID() const {
+ BSONObjBuilder builder;
+ if (_nss && _min)
+ builder.append(name.name(), genLegacyID(*_nss, *_min));
+ if (_nss)
+ builder.append(ns.name(), getNS().ns());
+ if (_min)
+ builder.append(min.name(), getMin());
+ if (_max)
+ builder.append(max.name(), getMax());
+ if (_shard)
+ builder.append(shard.name(), getShard().toString());
+ if (_version)
+ _version->appendLegacyWithField(&builder, ChunkType::lastmod());
+ if (_jumbo)
+ builder.append(jumbo.name(), getJumbo());
+ addHistoryToBSON(builder);
+ return builder.obj();
+}
+
StatusWith<ChunkType> ChunkType::fromShardBSON(const BSONObj& source, const OID& epoch) {
ChunkType chunk;
@@ -412,17 +442,13 @@ BSONObj ChunkType::toShardBSON() const {
return builder.obj();
}
-std::string ChunkType::getName() const {
- invariant(_id);
+const OID& ChunkType::getName() const {
+ uassert(51264, "Chunk name is not set", _id);
return *_id;
}
-void ChunkType::setName(const std::string& id) {
- _id = id;
-}
-
void ChunkType::setName(const OID& id) {
- _id = id.toString();
+ _id = id;
}
void ChunkType::setNS(const NamespaceString& nss) {
@@ -524,4 +550,22 @@ std::string ChunkType::toString() const {
return toConfigBSON().toString();
}
+std::string ChunkType::genLegacyID(const NamespaceString& nss, const BSONObj& o) {
+ StringBuilder buf;
+ buf << nss.ns() << "-";
+
+ BSONObjIterator i(o);
+ while (i.more()) {
+ BSONElement e = i.next();
+ buf << e.fieldName() << "_" << e.toString(false, true);
+ }
+
+ return buf.str();
+}
+
+std::string ChunkType::getLegacyName() const {
+ invariant(_nss && _min);
+ return genLegacyID(*_nss, *_min);
+}
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index f517cc65c38..ce5526b01b0 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -183,7 +183,8 @@ public:
static const std::string ShardNSPrefix;
// Field names and types in the chunks collections.
- static const BSONField<std::string> name;
+ static const BSONField<OID> name;
+ static const BSONField<std::string> legacyName; // TODO SERVER-44034: Remove legacyName.
static const BSONField<BSONObj> minShardID;
static const BSONField<std::string> ns;
static const BSONField<BSONObj> min;
@@ -214,6 +215,14 @@ public:
BSONObj toConfigBSON() const;
/**
+ * Returns the BSON representation of the entry for the config server's config.chunks
+ * collection using the _id format expected by binaries in 4.2 and earlier.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
+ */
+ BSONObj toConfigBSONLegacyID() const;
+
+ /**
* Constructs a new ChunkType object from BSON that has a shard server's config.chunks.<epoch>
* collection format.
*
@@ -222,19 +231,28 @@ public:
static StatusWith<ChunkType> fromShardBSON(const BSONObj& source, const OID& epoch);
/**
+ * Generates the chunk id that would be expected in binaries 4.2 and earlier based on the
+ * namespace and lower chunk bound.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
+ */
+ static std::string genLegacyID(const NamespaceString& nss, const BSONObj& o);
+
+ /**
* Returns the BSON representation of the entry for a shard server's config.chunks.<epoch>
* collection.
*/
BSONObj toShardBSON() const;
- std::string getName() const;
- void setName(const OID& id);
-
/**
- * TODO SERVER-42299: Remove this method once _id is stored as an OID on disk instead of as a
- * string.
+ * Returns the _id that would be used for this chunk in binaries 4.2 and earlier.
+ *
+ * TODO SERVER-44034: Remove when 4.4 becomes last-stable.
*/
- void setName(const std::string& id);
+ std::string getLegacyName() const;
+
+ const OID& getName() const;
+ void setName(const OID& id);
/**
* Getters and setters.
@@ -303,7 +321,7 @@ private:
// Convention: (M)andatory, (O)ptional, (S)pecial; (C)onfig, (S)hard.
// (M)(C) auto-generated object id
- boost::optional<std::string> _id;
+ boost::optional<OID> _id;
// (O)(C) collection this chunk is in
boost::optional<NamespaceString> _nss;
// (M)(C)(S) first key of the range, inclusive
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 3c424f815cc..aeabb3749c5 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -50,7 +50,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
<< chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -58,14 +58,14 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch());
@@ -73,7 +73,7 @@ TEST(ChunkType, MissingConfigRequiredFields) {
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModVersion =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromConfigBSON(objModVersion);
@@ -130,7 +130,7 @@ TEST(ChunkType, ToFromShardBSON) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -142,7 +142,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -154,7 +154,7 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, MinToMaxNotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name(OID::gen().toString())
+ BSON(ChunkType::name(OID::gen())
<< ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
<< ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
@@ -163,7 +163,7 @@ TEST(ChunkType, MinToMaxNotAscending) {
}
TEST(ChunkType, ToFromConfigBSON) {
- const std::string chunkID = OID::gen().toString();
+ const auto chunkID = OID::gen();
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
BSON(ChunkType::name(chunkID)
@@ -267,5 +267,69 @@ TEST(ChunkRange, MinGreaterThanMaxShouldError) {
ASSERT_EQ(ErrorCodes::FailedToParse, parseStatus.getStatus());
}
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, FromConfigBSONParsesIgnores42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj = BSON("_id" << ChunkType::genLegacyID(nss, minBound) << ChunkType::ns(nss.ns())
+ << ChunkType::min(minBound) << ChunkType::max(BSON("a" << 20))
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+
+ // Parsing will succeed despite the string _id.
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ // Attempting to get the 4.4 _id will throw since it hasn't been set.
+ ASSERT_THROWS_CODE(chunk.getName(), AssertionException, 51264);
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, LegacyNameBSONFieldIs_id) {
+ auto obj = BSON(ChunkType::legacyName("dummyId"));
+ ASSERT_BSONOBJ_EQ(obj,
+ BSON("_id"
+ << "dummyId"));
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, GetLegacyNameAndGenLegacyIDReturn42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen())
+ << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ ASSERT_EQ("test.mycol-a_10", ChunkType::genLegacyID(nss, minBound));
+ ASSERT_EQ(ChunkType::genLegacyID(nss, minBound), chunk.getLegacyName());
+}
+
+// TODO SERVER-44034: Delete this test.
+TEST(ChunkType, ToConfigBSONLegacyIDUses42_idFormat) {
+ NamespaceString nss("test.mycol");
+ auto minBound = BSON("a" << 10);
+ ChunkVersion chunkVersion(1, 2, OID::gen());
+
+ BSONObj obj =
+ BSON(ChunkType::name(OID::gen())
+ << ChunkType::ns(nss.ns()) << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ auto chunk = uassertStatusOK(ChunkType::fromConfigBSON(obj));
+
+ ASSERT_BSONOBJ_EQ(chunk.toConfigBSONLegacyID(),
+ BSON("_id" << ChunkType::genLegacyID(nss, minBound)
+ << ChunkType::ns("test.mycol") << ChunkType::min(minBound)
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
+ << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
+ << chunkVersion.epoch()));
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp
index 4f949c5b754..9f21a2a5dfa 100644
--- a/src/mongo/s/catalog/type_tags.cpp
+++ b/src/mongo/s/catalog/type_tags.cpp
@@ -147,6 +147,13 @@ BSONObj TagsType::toBSON() const {
return builder.obj();
}
+BSONObj TagsType::toBSONLegacyID() const {
+ // Note that toBSON() doesn't append an _id.
+ BSONObjBuilder bob(toBSON());
+ bob.append("_id", BSON(TagsType::ns(_ns->ns()) << TagsType::min(*_minKey)));
+ return bob.obj();
+}
+
std::string TagsType::toString() const {
return toBSON().toString();
}
diff --git a/src/mongo/s/catalog/type_tags.h b/src/mongo/s/catalog/type_tags.h
index d69d9eeb057..6e1f3e33dc1 100644
--- a/src/mongo/s/catalog/type_tags.h
+++ b/src/mongo/s/catalog/type_tags.h
@@ -81,6 +81,14 @@ public:
BSONObj toBSON() const;
/**
+ * Returns the BSON representation of the tag with an _id in the format expected by binaries 4.2
+ * and below.
+ *
+ * TODO SERVER-44034: Remove this method.
+ */
+ BSONObj toBSONLegacyID() const;
+
+ /**
* Returns a std::string representation of the current internal state.
*/
std::string toString() const;
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index 1cd8ed6d276..e78f6eedeea 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -129,4 +129,18 @@ TEST(TagsType, BadType) {
ASSERT_EQUALS(ErrorCodes::NoSuchKey, status.getStatus());
}
+TEST(TagsType, ToBSONLegacyID) {
+ BSONObj obj =
+ BSON(TagsType::ns("test.mycol") << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
+ << TagsType::max(BSON("a" << 20)));
+
+ auto tag = uassertStatusOK(TagsType::fromBSON(obj));
+
+ ASSERT_BSONOBJ_EQ(tag.toBSONLegacyID(),
+ BSON(TagsType::ns("test.mycol")
+ << TagsType::tag("tag") << TagsType::min(BSON("a" << 10))
+ << TagsType::max(BSON("a" << 20)) << "_id"
+ << BSON(TagsType::ns("test.mycol") << TagsType::min(BSON("a" << 10)))));
+}
+
} // namespace