summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-06-20 19:34:16 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-06-22 14:04:18 -0400
commit5dabb36c0b930f03f8da5cc1b572f3edcf4cff58 (patch)
tree8360063842d5e14cc82b21d370dcde8499df10b9 /src/mongo/db/s
parentcfb0b9ff7ca1a20c3c37edd2baf6e5c509c65aca (diff)
downloadmongo-5dabb36c0b930f03f8da5cc1b572f3edcf4cff58.tar.gz
SERVER-35691 Cleanup the ChunkVersion serialization/deserialization code
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp2
-rw-r--r--src/mongo/db/s/balancer/type_migration.cpp5
-rw-r--r--src/mongo/db/s/balancer/type_migration_test.cpp16
-rw-r--r--src/mongo/db/s/collection_metadata.cpp4
-rw-r--r--src/mongo/db/s/config/configsvr_drop_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp31
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp14
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp12
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp35
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp2
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp4
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp4
13 files changed, 61 insertions, 72 deletions
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index 5aae83503a2..4f83a2a5487 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -218,7 +218,7 @@ void MigrationManagerTest::setUpMigration(const ChunkType& chunk, const ShardId&
builder.append(MigrationType::max(), chunk.getMax());
builder.append(MigrationType::toShard(), toShard.toString());
builder.append(MigrationType::fromShard(), chunk.getShard().toString());
- chunk.getVersion().appendWithFieldForCommands(&builder, "chunkVersion");
+ chunk.getVersion().appendWithField(&builder, "chunkVersion");
MigrationType migrationType = assertGet(MigrationType::fromBSON(builder.obj()));
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 1d9b7ba6c7d..b72534b31a0 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -98,7 +98,7 @@ MigrateInfo makeMigrateInfo() {
chunkBuilder.append(ChunkType::ns(), kNs);
chunkBuilder.append(ChunkType::min(), kMin);
chunkBuilder.append(ChunkType::max(), kMax);
- kChunkVersion.appendForChunk(&chunkBuilder);
+ kChunkVersion.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod());
chunkBuilder.append(ChunkType::shard(), kFromShard.toString());
ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj()));
diff --git a/src/mongo/db/s/balancer/type_migration.cpp b/src/mongo/db/s/balancer/type_migration.cpp
index e6341b77685..bcd5f8ffe56 100644
--- a/src/mongo/db/s/balancer/type_migration.cpp
+++ b/src/mongo/db/s/balancer/type_migration.cpp
@@ -99,8 +99,7 @@ StatusWith<MigrationType> MigrationType::fromBSON(const BSONObj& source) {
}
{
- auto chunkVersionStatus =
- ChunkVersion::parseFromBSONWithFieldForCommands(source, kChunkVersion);
+ auto chunkVersionStatus = ChunkVersion::parseWithField(source, kChunkVersion);
if (!chunkVersionStatus.isOK())
return chunkVersionStatus.getStatus();
migrationType._chunkVersion = chunkVersionStatus.getValue();
@@ -130,7 +129,7 @@ BSONObj MigrationType::toBSON() const {
builder.append(fromShard.name(), _fromShard.toString());
builder.append(toShard.name(), _toShard.toString());
- _chunkVersion.appendWithFieldForCommands(&builder, kChunkVersion);
+ _chunkVersion.appendWithField(&builder, kChunkVersion);
builder.append(waitForDelete.name(), _waitForDelete);
return builder.obj();
diff --git a/src/mongo/db/s/balancer/type_migration_test.cpp b/src/mongo/db/s/balancer/type_migration_test.cpp
index 8d5d7cd07db..4b14d8cd018 100644
--- a/src/mongo/db/s/balancer/type_migration_test.cpp
+++ b/src/mongo/db/s/balancer/type_migration_test.cpp
@@ -56,7 +56,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
chunkBuilder.append(ChunkType::ns(), kNs);
chunkBuilder.append(ChunkType::min(), kMin);
chunkBuilder.append(ChunkType::max(), kMax);
- version.appendForChunk(&chunkBuilder);
+ version.appendLegacyWithField(&chunkBuilder, ChunkType::lastmod());
chunkBuilder.append(ChunkType::shard(), kFromShard.toString());
ChunkType chunkType = assertGet(ChunkType::fromConfigBSON(chunkBuilder.obj()));
@@ -72,7 +72,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::fromShard(), kFromShard.toString());
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
builder.append(MigrationType::waitForDelete(), kWaitForDelete);
BSONObj obj = builder.obj();
@@ -90,7 +90,7 @@ TEST(MigrationTypeTest, FromAndToBSON) {
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::fromShard(), kFromShard.toString());
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
builder.append(MigrationType::waitForDelete(), kWaitForDelete);
BSONObj obj = builder.obj();
@@ -107,7 +107,7 @@ TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::fromShard(), kFromShard.toString());
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
BSONObj obj = builder.obj();
@@ -124,7 +124,7 @@ TEST(MigrationTypeTest, MissingRequiredMinField) {
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::fromShard(), kFromShard.toString());
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
BSONObj obj = builder.obj();
@@ -141,7 +141,7 @@ TEST(MigrationTypeTest, MissingRequiredMaxField) {
builder.append(MigrationType::min(), kMin);
builder.append(MigrationType::fromShard(), kFromShard.toString());
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
BSONObj obj = builder.obj();
@@ -158,7 +158,7 @@ TEST(MigrationTypeTest, MissingRequiredFromShardField) {
builder.append(MigrationType::min(), kMin);
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::toShard(), kToShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
BSONObj obj = builder.obj();
@@ -175,7 +175,7 @@ TEST(MigrationTypeTest, MissingRequiredToShardField) {
builder.append(MigrationType::min(), kMin);
builder.append(MigrationType::max(), kMax);
builder.append(MigrationType::fromShard(), kFromShard.toString());
- version.appendWithFieldForCommands(&builder, "chunkVersion");
+ version.appendWithField(&builder, "chunkVersion");
BSONObj obj = builder.obj();
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 7e4b140d940..fa2455ee981 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -95,8 +95,8 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const {
}
void CollectionMetadata::toBSONBasic(BSONObjBuilder& bb) const {
- _cm->getVersion().addToBSON(bb, "collVersion");
- getShardVersion().addToBSON(bb, "shardVersion");
+ _cm->getVersion().appendLegacyWithField(&bb, "collVersion");
+ getShardVersion().appendLegacyWithField(&bb, "shardVersion");
bb.append("keyPattern", _cm->getShardKeyPattern().toBSON());
}
diff --git a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
index 3663903cb6b..432c28fc442 100644
--- a/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_collection_command.cpp
@@ -202,7 +202,7 @@ private:
// not sharded. Collections residing on the config server are never sharded so do not
// send the shard version.
if (shardId != shardRegistry->getConfigShard()->getId()) {
- ChunkVersion::UNSHARDED().appendForCommands(&builder);
+ ChunkVersion::UNSHARDED().appendToCommand(&builder);
}
if (!opCtx->getWriteConcern().usedDefault) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index f7734383e6f..bdbc7226e19 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -60,12 +60,13 @@ MONGO_FAIL_POINT_DEFINE(migrationCommitVersionError);
/**
* Append min, max and version information from chunk to the buffer for logChange purposes.
*/
-void appendShortVersion(BufBuilder* b, const ChunkType& chunk) {
- BSONObjBuilder bb(*b);
+void appendShortVersion(BufBuilder* out, const ChunkType& chunk) {
+ BSONObjBuilder bb(*out);
bb.append(ChunkType::min(), chunk.getMin());
bb.append(ChunkType::max(), chunk.getMax());
- if (chunk.isVersionSet())
- chunk.getVersion().addToBSON(bb, ChunkType::lastmod());
+ if (chunk.isVersionSet()) {
+ chunk.getVersion().appendLegacyWithField(&bb, ChunkType::lastmod());
+ }
bb.done();
}
@@ -187,7 +188,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
BSONObjBuilder n(op.subobjStart("o"));
n.append(ChunkType::name(), ChunkType::genID(nss, migratedChunk.getMin()));
- migratedChunk.getVersion().addToBSON(n, ChunkType::lastmod());
+ migratedChunk.getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), migratedChunk.getMin());
n.append(ChunkType::max(), migratedChunk.getMax());
@@ -211,7 +212,7 @@ BSONObj makeCommitChunkTransactionCommand(const NamespaceString& nss,
BSONObjBuilder n(op.subobjStart("o"));
n.append(ChunkType::name(), ChunkType::genID(nss, controlChunk->getMin()));
- controlChunk->getVersion().addToBSON(n, ChunkType::lastmod());
+ controlChunk->getVersion().appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), controlChunk->getMin());
n.append(ChunkType::max(), controlChunk->getMax());
@@ -300,7 +301,8 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
return {ErrorCodes::IllegalOperation, errmsg};
}
- ChunkVersion collVersion = ChunkVersion::fromBSON(chunksVector.front(), ChunkType::lastmod());
+ ChunkVersion collVersion = uassertStatusOK(
+ ChunkVersion::parseLegacyWithField(chunksVector.front(), ChunkType::lastmod()));
// Return an error if collection epoch does not match epoch of request.
if (collVersion.epoch() != requestEpoch) {
@@ -380,7 +382,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
n.append(ChunkType::name(), ChunkType::genID(nss, startKey));
- currentMaxVersion.addToBSON(n, ChunkType::lastmod());
+ currentMaxVersion.appendLegacyWithField(&n, ChunkType::lastmod());
n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
n.append(ChunkType::max(), endKey);
@@ -445,7 +447,7 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
BSONObjBuilder b(logDetail.subobjStart("before"));
b.append(ChunkType::min(), range.getMin());
b.append(ChunkType::max(), range.getMax());
- collVersion.addToBSON(b, ChunkType::lastmod());
+ collVersion.appendLegacyWithField(&b, ChunkType::lastmod());
}
if (newChunks.size() == 2) {
@@ -516,7 +518,8 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
return {ErrorCodes::IllegalOperation,
"collection does not exist, isn't sharded, or has no chunks"};
- ChunkVersion collVersion = ChunkVersion::fromBSON(chunksVector.front(), ChunkType::lastmod());
+ ChunkVersion collVersion = uassertStatusOK(
+ ChunkVersion::parseLegacyWithField(chunksVector.front(), ChunkType::lastmod()));
// Return an error if epoch of chunk does not match epoch of request
if (collVersion.epoch() != requestEpoch) {
@@ -580,8 +583,8 @@ Status ShardingCatalogManager::commitChunkMerge(OperationContext* opCtx,
b.append(chunkToMerge.toConfigBSON());
}
}
- collVersion.addToBSON(logDetail, "prevShardVersion");
- mergeVersion.addToBSON(logDetail, "mergedVersion");
+ collVersion.appendLegacyWithField(&logDetail, "prevShardVersion");
+ mergeVersion.appendLegacyWithField(&logDetail, "mergedVersion");
Grid::get(opCtx)
->catalogClient()
@@ -751,9 +754,9 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
}
BSONObjBuilder result;
- newMigratedChunk.getVersion().appendWithFieldForCommands(&result, "migratedChunkVersion");
+ newMigratedChunk.getVersion().appendWithField(&result, "migratedChunkVersion");
if (controlChunk) {
- newControlChunk->getVersion().appendWithFieldForCommands(&result, "controlChunkVersion");
+ newControlChunk->getVersion().appendWithField(&result, "controlChunkVersion");
}
return result.obj();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 901c6a5b104..8b68f86246e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -104,11 +104,11 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
// Verify the versions returned match expected values.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "migratedChunkVersion");
+ auto mver = ChunkVersion::parseWithField(versions, "migratedChunkVersion");
ASSERT_OK(mver.getStatus());
ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, origVersion.epoch()), mver.getValue());
- auto cver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "controlChunkVersion");
+ auto cver = ChunkVersion::parseWithField(versions, "controlChunkVersion");
ASSERT_OK(cver.getStatus());
ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 1, origVersion.epoch()), cver.getValue());
@@ -174,11 +174,11 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
// Verify the version returned matches expected value.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "migratedChunkVersion");
+ auto mver = ChunkVersion::parseWithField(versions, "migratedChunkVersion");
ASSERT_OK(mver.getStatus());
ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, origVersion.epoch()), mver.getValue());
- auto cver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "controlChunkVersion");
+ auto cver = ChunkVersion::parseWithField(versions, "controlChunkVersion");
ASSERT_NOT_OK(cver.getStatus());
// Verify the chunk ended up in the right shard, and version matches the value returned.
@@ -235,11 +235,11 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
// Verify the version returned matches expected value.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "migratedChunkVersion");
+ auto mver = ChunkVersion::parseWithField(versions, "migratedChunkVersion");
ASSERT_OK(mver.getStatus());
ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, origVersion.epoch()), mver.getValue());
- auto cver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "controlChunkVersion");
+ auto cver = ChunkVersion::parseWithField(versions, "controlChunkVersion");
ASSERT_NOT_OK(cver.getStatus());
// Verify the chunk ended up in the right shard, and version matches the value returned.
@@ -505,7 +505,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
// Verify the versions returned match expected values.
BSONObj versions = resultBSON.getValue();
- auto mver = ChunkVersion::parseFromBSONWithFieldForCommands(versions, "migratedChunkVersion");
+ auto mver = ChunkVersion::parseWithField(versions, "migratedChunkVersion");
ASSERT_OK(mver.getStatus());
ASSERT_EQ(ChunkVersion(origMajorVersion + 1, 0, origVersion.epoch()), mver.getValue());
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index 5f855ba10d2..5527d066453 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -75,17 +75,7 @@ void OperationShardingState::initializeClientRoutingVersions(NamespaceString nss
const auto shardVersionElem = cmdObj.getField(ChunkVersion::kShardVersionField);
if (!shardVersionElem.eoo()) {
- uassert(ErrorCodes::BadValue,
- str::stream() << "expected shardVersion element to be an array, got "
- << shardVersionElem,
- shardVersionElem.type() == BSONType::Array);
- const BSONArray versionArr(shardVersionElem.Obj());
-
- bool canParse;
- ChunkVersion shardVersion = ChunkVersion::fromBSON(versionArr, &canParse);
- uassert(ErrorCodes::BadValue,
- str::stream() << "could not parse shardVersion from field " << versionArr,
- canParse);
+ auto shardVersion = uassertStatusOK(ChunkVersion::parseFromCommand(cmdObj));
if (nss.isSystemDotIndexes()) {
_shardVersions[nss.ns()] = ChunkVersion::IGNORED();
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 967332fabc2..acfd347345c 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -48,14 +48,11 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
+#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/util/log.h"
#include "mongo/util/stringutils.h"
namespace mongo {
-
-using std::string;
-using str::stream;
-
namespace {
class SetShardVersion : public ErrmsgCommandDeprecated {
@@ -89,7 +86,7 @@ public:
bool errmsgRun(OperationContext* opCtx,
const std::string&,
const BSONObj& cmdObj,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) {
uassert(ErrorCodes::IllegalOperation,
"can't issue setShardVersion from 'eval'",
@@ -197,8 +194,8 @@ public:
nss.isValid());
// Validate chunk version parameter.
- const ChunkVersion requestedVersion =
- uassertStatusOK(ChunkVersion::parseFromBSONForSetShardVersion(cmdObj));
+ const ChunkVersion requestedVersion = uassertStatusOK(
+ ChunkVersion::parseLegacyWithField(cmdObj, SetShardVersionRequest::kVersion));
// Step 4
@@ -210,7 +207,7 @@ public:
// as UNSHARDED is the legacy way to achieve this purpose.
const auto connectionVersion =
(connectionVersionOrNotSet ? *connectionVersionOrNotSet : ChunkVersion::UNSHARDED());
- connectionVersion.addToBSON(result, "oldVersion");
+ connectionVersion.appendLegacyWithField(&result, "oldVersion");
{
boost::optional<AutoGetDb> autoDb;
@@ -273,7 +270,7 @@ public:
if (!authoritative) {
result.appendBool("need_authoritative", true);
result.append("ns", nss.ns());
- collectionShardVersion.addToBSON(result, "globalVersion");
+ collectionShardVersion.appendLegacyWithField(&result, "globalVersion");
errmsg = "dropping needs to be authoritative";
return false;
}
@@ -290,8 +287,8 @@ public:
errmsg = str::stream() << "this connection already had a newer version "
<< "of collection '" << nss.ns() << "'";
result.append("ns", nss.ns());
- requestedVersion.addToBSON(result, "newVersion");
- collectionShardVersion.addToBSON(result, "globalVersion");
+ requestedVersion.appendLegacyWithField(&result, "newVersion");
+ collectionShardVersion.appendLegacyWithField(&result, "globalVersion");
return false;
}
@@ -310,8 +307,8 @@ public:
errmsg = str::stream() << "shard global version for collection is higher "
<< "than trying to set to '" << nss.ns() << "'";
result.append("ns", nss.ns());
- requestedVersion.addToBSON(result, "version");
- collectionShardVersion.addToBSON(result, "globalVersion");
+ requestedVersion.appendLegacyWithField(&result, "version");
+ collectionShardVersion.appendLegacyWithField(&result, "globalVersion");
result.appendBool("reloadConfig", true);
return false;
}
@@ -367,8 +364,8 @@ public:
warning() << errmsg;
result.append("ns", nss.ns());
- requestedVersion.addToBSON(result, "version");
- currVersion.addToBSON(result, "globalVersion");
+ requestedVersion.appendLegacyWithField(&result, "version");
+ currVersion.appendLegacyWithField(&result, "globalVersion");
result.appendBool("reloadConfig", true);
return false;
@@ -386,7 +383,7 @@ public:
// version reload.
result.append("ns", nss.ns());
- currVersion.addToBSON(result, "globalVersion");
+ currVersion.appendLegacyWithField(&result, "globalVersion");
// If this was a reset of a collection or the last chunk moved out, inform mongos to
// do a full reload.
@@ -394,11 +391,11 @@ public:
result.appendBool("reloadConfig", true);
// Zero-version also needed to trigger full mongos reload, sadly
// TODO: Make this saner, and less impactful (full reload on last chunk is bad)
- ChunkVersion(0, 0, OID()).addToBSON(result, "version");
+ ChunkVersion(0, 0, OID()).appendLegacyWithField(&result, "version");
// For debugging
- requestedVersion.addToBSON(result, "origVersion");
+ requestedVersion.appendLegacyWithField(&result, "origVersion");
} else {
- requestedVersion.addToBSON(result, "version");
+ requestedVersion.appendLegacyWithField(&result, "version");
}
return false;
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index b5669bdc004..28384a70a85 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -366,7 +366,7 @@ Status updateShardChunks(OperationContext* opCtx,
*
*/
for (auto& chunk : chunks) {
- invariant(chunk.getVersion().hasEqualEpoch(currEpoch));
+ invariant(chunk.getVersion().epoch() == currEpoch);
// Delete any overlapping chunk ranges. Overlapping chunks will have a min value
// ("_id") between (chunk.min, chunk.max].
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 8c650e17531..b6a397f26df 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -287,7 +287,7 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
subMax.append("a", 10000);
}
splitChunkOneBuilder.append(ChunkType::shard(), lastChunk.getShard().toString());
- collVersion.appendForChunk(&splitChunkOneBuilder);
+ collVersion.appendLegacyWithField(&splitChunkOneBuilder, ChunkType::lastmod());
ChunkType splitChunkOne =
assertGet(ChunkType::fromShardBSON(splitChunkOneBuilder.obj(), collVersion.epoch()));
newChunks.push_back(splitChunkOne);
@@ -301,7 +301,7 @@ TEST_F(ShardMetadataUtilTest, UpdateWithWriteNewChunks) {
}
splitChunkTwoMovedBuilder.append(ChunkType::max(), lastChunk.getMax());
splitChunkTwoMovedBuilder.append(ChunkType::shard(), "altShard");
- collVersion.appendForChunk(&splitChunkTwoMovedBuilder);
+ collVersion.appendLegacyWithField(&splitChunkTwoMovedBuilder, ChunkType::lastmod());
ChunkType splitChunkTwoMoved =
assertGet(ChunkType::fromShardBSON(splitChunkTwoMovedBuilder.obj(), collVersion.epoch()));
newChunks.push_back(splitChunkTwoMoved);
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index c07cc18b8b5..38b6d97da1b 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -1141,7 +1141,7 @@ void ShardServerCatalogCacheLoader::CollAndChunkTaskList::addTask(collAndChunkTa
}
if (task.dropped) {
- invariant(_tasks.back().maxQueryVersion.equals(task.minQueryVersion));
+ invariant(_tasks.back().maxQueryVersion == task.minQueryVersion);
// As an optimization, on collection drop, clear any pending tasks in order to prevent any
// throw-away work from executing. Because we have no way to differentiate whether the
@@ -1155,7 +1155,7 @@ void ShardServerCatalogCacheLoader::CollAndChunkTaskList::addTask(collAndChunkTa
}
} else {
// Tasks must have contiguous versions, unless a complete reload occurs.
- invariant(_tasks.back().maxQueryVersion.equals(task.minQueryVersion) ||
+ invariant(_tasks.back().maxQueryVersion == task.minQueryVersion ||
!task.minQueryVersion.isSet());
_tasks.emplace_back(std::move(task));