summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js1
-rw-r--r--jstests/sharding/store_historical_placement_data.js8
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp12
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp16
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp3
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp12
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp64
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp54
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp12
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp12
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp30
-rw-r--r--src/mongo/s/catalog/type_chunk.h16
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp57
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp3
-rw-r--r--src/mongo/s/chunk_test.cpp21
18 files changed, 253 insertions, 78 deletions
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
index 75b417577ab..9d483e2670b 100644
--- a/jstests/sharding/initial_split_validate_shard_collections.js
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -66,7 +66,6 @@ const chunksOnShard0 = st.shard0.getDB("config").getCollection(chunksCollName).f
const chunksOnShard1 = st.shard1.getDB("config").getCollection(chunksCollName).find().toArray();
assert.eq(chunksOnConfigCount, chunksOnShard0.length);
assert.eq(chunksOnConfigCount, chunksOnShard1.length);
-assert.eq(chunksOnShard0, chunksOnShard1);
st.stop();
})();
diff --git a/jstests/sharding/store_historical_placement_data.js b/jstests/sharding/store_historical_placement_data.js
index 019a8f13b0a..80e57d23def 100644
--- a/jstests/sharding/store_historical_placement_data.js
+++ b/jstests/sharding/store_historical_placement_data.js
@@ -130,6 +130,8 @@ function testMoveChunk(dbName, collName) {
let migratedChunk = configDB.chunks.findOne({uuid: collUUID, min: {x: MinKey}});
assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.history[0].validAfter) ===
0);
+ assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.onCurrentShardSince) ===
+ 0);
// Move out the last chunk from shard0 to shard2 - a new placement entry should appear, where
// the donor has been removed and the recipient inserted
@@ -139,6 +141,8 @@ function testMoveChunk(dbName, collName) {
migratedChunk = configDB.chunks.findOne({uuid: collUUID, min: {x: 0}});
assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.history[0].validAfter) ===
0);
+ assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.onCurrentShardSince) ===
+ 0);
// Create a third chunk in shard1, then move it to shard2: since this migration does not alter
// the subset of shards owning collection data, no new record should be inserted
@@ -168,6 +172,8 @@ function testMoveRange(dbName, collName) {
let migratedChunk = configDB.chunks.findOne({uuid: collUUID, min: {x: MinKey}});
assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.history[0].validAfter) ===
0);
+ assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.onCurrentShardSince) ===
+ 0);
// Move the other half to shard 1 -> shard 0 should be removed from the placement data
assert.commandWorked(
@@ -176,6 +182,8 @@ function testMoveRange(dbName, collName) {
migratedChunk = configDB.chunks.findOne({uuid: collUUID, min: {x: 0}});
assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.history[0].validAfter) ===
0);
+ assert(timestampCmp(placementAfterMigration.timestamp, migratedChunk.onCurrentShardSince) ===
+ 0);
}
function testMovePrimary(dbName, fromPrimaryShardName, toPrimaryShardName) {
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 1ec5b6be31f..0226a423f75 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -83,17 +83,20 @@ protected:
{shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
version,
{"0"});
- chunk1.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")),
+ chunk1.setOnCurrentShardSince(Timestamp(75, 0));
+ chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")),
ChunkHistory(Timestamp(25, 0), ShardId("1"))});
version.incMinor();
ChunkType chunk2(uuid, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
- chunk2.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")),
+ chunk2.setOnCurrentShardSince(Timestamp(75, 0));
+ chunk2.setHistory({ChunkHistory(*chunk2.getOnCurrentShardSince(), ShardId("1")),
ChunkHistory(Timestamp(25, 0), ShardId("0"))});
version.incMinor();
ChunkType chunk3(uuid, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
- chunk3.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")),
+ chunk3.setOnCurrentShardSince(Timestamp(75, 0));
+ chunk3.setHistory({ChunkHistory(*chunk3.getOnCurrentShardSince(), ShardId("0")),
ChunkHistory(Timestamp(25, 0), ShardId("1"))});
version.incMinor();
@@ -101,7 +104,8 @@ protected:
{BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
version,
{"1"});
- chunk4.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")),
+ chunk4.setOnCurrentShardSince(Timestamp(75, 0));
+ chunk4.setHistory({ChunkHistory(*chunk4.getOnCurrentShardSince(), ShardId("1")),
ChunkHistory(Timestamp(25, 0), ShardId("0"))});
version.incMinor();
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 3c576765f4c..c1cf8022901 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -54,7 +54,7 @@ CollectionMetadata makeCollectionMetadataImpl(
const OID epoch = OID::gen();
- const Timestamp kRouting(100, 0);
+ const Timestamp kOnCurrentShardSince(100, 0);
const Timestamp kChunkManager(staleChunkManager ? 99 : 100, 0);
std::vector<ChunkType> allChunks;
@@ -65,12 +65,18 @@ CollectionMetadata makeCollectionMetadataImpl(
// Need to add a chunk to the other shard from nextMinKey to myNextChunk.first.
allChunks.emplace_back(
uuid, ChunkRange{nextMinKey, myNextChunk.first}, version, kOtherShard);
- allChunks.back().setHistory({ChunkHistory(kRouting, kOtherShard)});
+ auto& chunk = allChunks.back();
+ chunk.setOnCurrentShardSince(kOnCurrentShardSince);
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
+
version.incMajor();
}
allChunks.emplace_back(
uuid, ChunkRange{myNextChunk.first, myNextChunk.second}, version, kThisShard);
- allChunks.back().setHistory({ChunkHistory(kRouting, kThisShard)});
+ auto& chunk = allChunks.back();
+ chunk.setOnCurrentShardSince(kOnCurrentShardSince);
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
+
version.incMajor();
nextMinKey = myNextChunk.second;
}
@@ -78,7 +84,9 @@ CollectionMetadata makeCollectionMetadataImpl(
if (SimpleBSONObjComparator::kInstance.evaluate(nextMinKey < shardKeyPattern.globalMax())) {
allChunks.emplace_back(
uuid, ChunkRange{nextMinKey, shardKeyPattern.globalMax()}, version, kOtherShard);
- allChunks.back().setHistory({ChunkHistory(kRouting, kOtherShard)});
+ auto& chunk = allChunks.back();
+ chunk.setOnCurrentShardSince(kOnCurrentShardSince);
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
}
return CollectionMetadata(
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 791d39061d5..482cb34bacc 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -76,7 +76,8 @@ void appendChunk(const SplitPolicyParams& params,
std::vector<ChunkType>* chunks) {
chunks->emplace_back(params.collectionUUID, ChunkRange(min, max), *version, shardId);
auto& chunk = chunks->back();
- chunk.setHistory({ChunkHistory(version->getTimestamp(), shardId)});
+ chunk.setOnCurrentShardSince(version->getTimestamp());
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shardId)});
version->incMinor();
}
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index cae1f09d826..444763175c1 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -207,7 +207,8 @@ public:
for (unsigned long i = 0; i < chunkRanges.size(); ++i) {
ChunkVersion version({OID::gen(), Timestamp(1, 1)}, {1, 0});
ChunkType chunk(_uuid, chunkRanges[i], version, shardIds[i]);
- chunk.setHistory({ChunkHistory(timeStamp, shardIds[i])});
+ chunk.setOnCurrentShardSince(timeStamp);
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shardIds[i])});
chunks.push_back(chunk);
}
return chunks;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
index e86036a43b3..e2788ee86e6 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp
@@ -86,7 +86,8 @@ protected:
chunkType.setShard(shardId);
chunkType.setMin(minKey);
chunkType.setMax(maxKey);
- chunkType.setHistory({ChunkHistory(Timestamp(100, 0), shardId)});
+ chunkType.setOnCurrentShardSince(Timestamp(100, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), shardId)});
return chunkType;
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index a9e62c5a472..cd63a41f319 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -721,7 +721,9 @@ void ShardingCatalogManager::_mergeChunksInTransaction(
mergedChunk.setVersion(mergeVersion);
mergedChunk.setEstimatedSizeBytes(boost::none);
- mergedChunk.setHistory({ChunkHistory(validAfter, mergedChunk.getShard())});
+ mergedChunk.setOnCurrentShardSince(validAfter);
+ mergedChunk.setHistory({ChunkHistory(*mergedChunk.getOnCurrentShardSince(),
+ mergedChunk.getShard())});
entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(
mergedChunk.toConfigBSON()));
@@ -1125,7 +1127,9 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx,
<< " is greater or equal to the new validAfter "
<< validAfter.value().toString()};
}
- newHistory.emplace(newHistory.begin(), ChunkHistory(validAfter.value(), toShard));
+ newMigratedChunk->setOnCurrentShardSince(validAfter.value());
+ newHistory.emplace(newHistory.begin(),
+ ChunkHistory(*newMigratedChunk->getOnCurrentShardSince(), toShard));
newMigratedChunk->setHistory(std::move(newHistory));
std::shared_ptr<std::vector<ChunkType>> newSplitChunks =
@@ -1315,7 +1319,9 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx,
changedShardIds.emplace(upgradeChunk.getShard());
// Construct the fresh history.
- upgradeChunk.setHistory({ChunkHistory{validAfter, upgradeChunk.getShard()}});
+ upgradeChunk.setOnCurrentShardSince(validAfter);
+ upgradeChunk.setHistory(
+ {ChunkHistory{*upgradeChunk.getOnCurrentShardSince(), upgradeChunk.getShard()}});
// Set the 'historyIsAt40' field so that it gets skipped if the command is re-run
BSONObjBuilder chunkObjBuilder(upgradeChunk.toConfigBSON());
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
index 2b627a53e3d..c4eb59fa688 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp
@@ -107,7 +107,9 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
- migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ migratedChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ migratedChunk.setHistory(
+ {ChunkHistory(*migratedChunk.getOnCurrentShardSince(), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
@@ -117,7 +119,9 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
controlChunk.setCollectionUUID(collUUID);
controlChunk.setVersion(origVersion);
controlChunk.setShard(shard0.getName());
- controlChunk.setHistory({ChunkHistory(Timestamp(50, 0), shard0.getName())});
+ controlChunk.setOnCurrentShardSince(Timestamp(50, 0));
+ controlChunk.setHistory(
+ {ChunkHistory(*controlChunk.getOnCurrentShardSince(), shard0.getName())});
controlChunk.setMin(BSON("a" << 10));
controlChunk.setMax(BSON("a" << 20));
controlChunk.setJumbo(true);
@@ -155,6 +159,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
// The migrated chunk's history should be updated.
ASSERT_EQ(2UL, chunkDoc0.getHistory().size());
ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter());
+ ASSERT_EQ(validAfter, *chunkDoc0.getOnCurrentShardSince());
auto chunkDoc1 = uassertStatusOK(
getChunkDoc(operationContext(), controlChunk.getMin(), collEpoch, collTimestamp));
@@ -166,6 +171,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) {
chunkDoc1.getHistory().front().getValidAfter());
ASSERT_EQ(controlChunk.getHistory().front().getShard(),
chunkDoc1.getHistory().front().getShard());
+ ASSERT(chunkDoc1.getOnCurrentShardSince().has_value());
+ ASSERT_EQ(controlChunk.getOnCurrentShardSince(), chunkDoc1.getOnCurrentShardSince());
ASSERT(chunkDoc1.getJumbo());
}
@@ -192,7 +199,8 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
- chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ chunk0.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk0.setHistory({ChunkHistory(*chunk0.getOnCurrentShardSince(), shard0.getName())});
// apportion
auto chunkMin = BSON("a" << 1);
@@ -233,6 +241,7 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) {
// The history should be updated.
ASSERT_EQ(2UL, chunkDoc0.getHistory().size());
ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter());
+ ASSERT_EQ(validAfter, *chunkDoc0.getOnCurrentShardSince());
}
TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
@@ -258,7 +267,8 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
- chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ chunk0.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk0.setHistory({ChunkHistory(*chunk0.getOnCurrentShardSince(), shard0.getName())});
// apportion
auto chunkMin = BSON("a" << 1);
@@ -297,6 +307,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) {
// The new history entry should be added, but the old one preserved.
ASSERT_EQ(2UL, chunkDoc0.getHistory().size());
ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter());
+ ASSERT_EQ(validAfter, *chunkDoc0.getOnCurrentShardSince());
}
TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
@@ -320,7 +331,8 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) {
chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
- chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ chunk0.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk0.setHistory({ChunkHistory(*chunk0.getOnCurrentShardSince(), shard0.getName())});
// apportion
auto chunkMin = BSON("a" << 1);
@@ -484,7 +496,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
chunk0.setCollectionUUID(collUUID);
chunk0.setVersion(origVersion);
chunk0.setShard(shard0.getName());
- chunk0.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ chunk0.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk0.setHistory({ChunkHistory(*chunk0.getOnCurrentShardSince(), shard0.getName())});
// apportion
auto chunkMin = BSON("a" << 1);
@@ -503,7 +516,8 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
chunk1.setMax(chunkMaxax);
Timestamp ctrlChunkValidAfter = Timestamp(50, 0);
- chunk1.setHistory({ChunkHistory(ctrlChunkValidAfter, shard1.getName())});
+ chunk1.setOnCurrentShardSince(ctrlChunkValidAfter);
+ chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), shard1.getName())});
setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1});
@@ -534,6 +548,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
// The migrated chunk's history should be updated.
ASSERT_EQ(2UL, chunkDoc0.getHistory().size());
ASSERT_EQ(validAfter, chunkDoc0.getHistory().front().getValidAfter());
+ ASSERT_EQ(validAfter, *chunkDoc0.getOnCurrentShardSince());
auto chunkDoc1 =
uassertStatusOK(getChunkDoc(operationContext(), chunkMax, collEpoch, collTimestamp));
@@ -543,6 +558,7 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks)
// The control chunk's history should be unchanged.
ASSERT_EQ(1UL, chunkDoc1.getHistory().size());
ASSERT_EQ(ctrlChunkValidAfter, chunkDoc1.getHistory().front().getValidAfter());
+ ASSERT_EQ(ctrlChunkValidAfter, *chunkDoc1.getOnCurrentShardSince());
}
TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
@@ -565,7 +581,9 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
migratedChunk.setName(OID::gen());
migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setShard(shard0.getName());
- migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ migratedChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ migratedChunk.setHistory(
+ {ChunkHistory(*migratedChunk.getOnCurrentShardSince(), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
@@ -574,7 +592,9 @@ TEST_F(CommitChunkMigrate, RejectMissingChunkVersion) {
currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(origVersion);
currentChunk.setShard(shard0.getName());
- currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ currentChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ currentChunk.setHistory(
+ {ChunkHistory(*currentChunk.getOnCurrentShardSince(), shard0.getName())});
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
@@ -615,7 +635,9 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
- migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ migratedChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ migratedChunk.setHistory(
+ {ChunkHistory(*migratedChunk.getOnCurrentShardSince(), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
@@ -626,7 +648,9 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) {
currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(currentChunkVersion);
currentChunk.setShard(shard0.getName());
- currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ currentChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ currentChunk.setHistory(
+ {ChunkHistory(*currentChunk.getOnCurrentShardSince(), shard0.getName())});
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
@@ -667,7 +691,9 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
migratedChunk.setCollectionUUID(collUUID);
migratedChunk.setVersion(origVersion);
migratedChunk.setShard(shard0.getName());
- migratedChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ migratedChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ migratedChunk.setHistory(
+ {ChunkHistory(*migratedChunk.getOnCurrentShardSince(), shard0.getName())});
migratedChunk.setMin(BSON("a" << 1));
migratedChunk.setMax(BSON("a" << 10));
@@ -678,7 +704,9 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) {
currentChunk.setCollectionUUID(collUUID);
currentChunk.setVersion(currentChunkVersion);
currentChunk.setShard(shard0.getName());
- currentChunk.setHistory({ChunkHistory(Timestamp(100, 0), shard0.getName())});
+ currentChunk.setOnCurrentShardSince(Timestamp(100, 0));
+ currentChunk.setHistory(
+ {ChunkHistory(*currentChunk.getOnCurrentShardSince(), shard0.getName())});
currentChunk.setMin(BSON("a" << 1));
currentChunk.setMax(BSON("a" << 10));
@@ -716,6 +744,8 @@ public:
chunk.setVersion(version);
chunk.setShard(shardID);
chunk.setHistory(history);
+ if (!history.empty())
+ chunk.setOnCurrentShardSince(history.front().getValidAfter());
chunk.setMin(min);
chunk.setMax(max);
@@ -731,7 +761,6 @@ public:
uint32_t currentMajorVersion = 1;
int historyTimestampSecond = 100;
- std::vector<ChunkHistory> history;
std::vector<BSONObj> chunksMin = {kKeyPattern.globalMin()};
for (int i = 10; i < numberOfChunks * 10; i += 10) {
chunksMin.push_back(BSON("x" << i));
@@ -744,8 +773,8 @@ public:
const auto shardId = _shardIds.at(i % 2); // Shard owning the chunk
ChunkVersion version =
ChunkVersion({_collEpoch, _collTimestamp}, {currentMajorVersion++, 0});
- history.insert(history.begin(),
- {ChunkHistory(Timestamp(historyTimestampSecond++, 0), shardId)});
+ std::vector<ChunkHistory> history{
+ ChunkHistory(Timestamp(historyTimestampSecond++, 0), shardId)};
ChunkType chunk = createChunk(_collUUID, min, max, version, shardId, history);
chunks.push_back(chunk);
}
@@ -811,6 +840,7 @@ public:
// The migrated chunk's history should have been updated with a new `validAfter` entry
ASSERT_EQ(origChunk.getHistory().size() + 1, newChunk.getHistory().size());
ASSERT_EQ(validAfter, newChunk.getHistory().front().getValidAfter());
+ ASSERT_EQ(validAfter, *newChunk.getOnCurrentShardSince());
// The migrated chunk's history must inherit the previous chunk's history
assertSameHistories(std::vector<ChunkHistory>(newChunk.getHistory().begin() + 1,
@@ -827,6 +857,7 @@ public:
migratedChunk.getVersion().epoch(),
migratedChunk.getVersion().getTimestamp()));
ASSERT_EQ(donor, leftSplitChunk.getShard());
+ ASSERT_EQ(origChunk.getOnCurrentShardSince(), leftSplitChunk.getOnCurrentShardSince());
// The min of the split chunk must be the min of the original chunk
ASSERT(leftSplitChunk.getMin().woCompare(origChunk.getMin()) == 0);
@@ -851,6 +882,7 @@ public:
migratedChunk.getVersion().epoch(),
migratedChunk.getVersion().getTimestamp()));
ASSERT_EQ(donor, rightSplitChunk.getShard());
+ ASSERT_EQ(origChunk.getOnCurrentShardSince(), rightSplitChunk.getOnCurrentShardSince());
// The min of the right split chunk must fit the max of the new chunk
ASSERT(rightSplitChunk.getMin().woCompare(migratedChunk.getMax()) == 0);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
index 8921d0c2e8b..e0cf9bd774e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp
@@ -61,7 +61,8 @@ ChunkType generateChunkType(const NamespaceString& nss,
chunkType.setShard(shardId);
chunkType.setMin(minKey);
chunkType.setMax(maxKey);
- chunkType.setHistory({ChunkHistory(Timestamp(100, 0), shardId)});
+ chunkType.setOnCurrentShardSince(Timestamp(100, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), shardId)});
return chunkType;
}
@@ -87,6 +88,7 @@ void assertChunkVersionWasBumpedTo(const ChunkType& chunkTypeBefore,
ASSERT_BSONOBJ_EQ(chunkTypeBefore.getMin(), chunkTypeAfter.getMin());
ASSERT_BSONOBJ_EQ(chunkTypeBefore.getMax(), chunkTypeAfter.getMax());
ASSERT(chunkTypeBefore.getHistory() == chunkTypeAfter.getHistory());
+ ASSERT_EQ(chunkTypeBefore.getOnCurrentShardSince(), chunkTypeAfter.getOnCurrentShardSince());
}
TEST_F(EnsureChunkVersionIsGreaterThanTest, IfNoCollectionFoundReturnsSuccess) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index 2b898f5cdca..61a545cc767 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -92,8 +92,10 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
chunk2.setName(OID::gen());
// set histories
- chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 0});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 0});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
// set boundaries
auto chunkMin = BSON("a" << 1);
@@ -181,9 +183,12 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
chunk3.setName(OID::gen());
// set histories
- chunk.setHistory({ChunkHistory{Timestamp{100, 10}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 1}, _shardId}});
- chunk3.setHistory({ChunkHistory{Timestamp{50, 0}, _shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 10});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 1});
+ chunk3.setOnCurrentShardSince(Timestamp{50, 0});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
+ chunk3.setHistory({ChunkHistory{*chunk3.getOnCurrentShardSince(), _shardId}});
auto chunkMin = BSON("a" << 1);
auto chunkBound = BSON("a" << 5);
@@ -242,6 +247,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
ASSERT_EQ(1UL, mergedChunk.getHistory().size());
ASSERT_EQ(chunk2.getHistory().front().getValidAfter(),
mergedChunk.getHistory().front().getValidAfter());
+ ASSERT_EQ(chunk2.getOnCurrentShardSince(), mergedChunk.getOnCurrentShardSince());
}
TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
@@ -263,8 +269,10 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
chunk2.setName(OID::gen());
// set histories
- chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 0});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 0});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
auto chunkMin = BSON("a" << 1);
auto chunkBound = BSON("a" << 5);
@@ -328,6 +336,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ASSERT_EQ(1UL, mergedChunk.getHistory().size());
ASSERT_EQ(chunk2.getHistory().front().getValidAfter(),
mergedChunk.getHistory().front().getValidAfter());
+ ASSERT_EQ(chunk2.getOnCurrentShardSince(), mergedChunk.getOnCurrentShardSince());
}
TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
@@ -348,8 +357,10 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
chunk2.setName(OID::gen());
// set histories
- chunk.setHistory({ChunkHistory{Timestamp{100, 5}, shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 1}, shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 5});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 1});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), shardId}});
auto chunkMin = BSON("a" << 1);
auto chunkBound = BSON("a" << 5);
@@ -430,8 +441,10 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
auto chunk2(chunk);
// set history
- chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 0});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 0});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
auto chunkMin = BSON("a" << 1);
auto chunkBound = BSON("a" << 5);
@@ -474,8 +487,10 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) {
auto chunk2(chunk);
// set histories
- chunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 0}, _shardId}});
+ chunk.setOnCurrentShardSince(Timestamp{100, 0});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 0});
+ chunk.setHistory({ChunkHistory{*chunk.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
auto chunkMin = BSON("a" << 1);
auto chunkBound = BSON("a" << 5);
@@ -522,7 +537,8 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) {
mergedChunk.setName(OID::gen());
mergedChunk.setCollectionUUID(collUuid);
mergedChunk.setShard(_shardId);
- mergedChunk.setHistory({ChunkHistory{Timestamp{100, 0}, _shardId}});
+ mergedChunk.setOnCurrentShardSince(Timestamp{100, 0});
+ mergedChunk.setHistory({ChunkHistory{*mergedChunk.getOnCurrentShardSince(), _shardId}});
setupCollection(_nss1, _keyPattern, {mergedChunk});
@@ -582,9 +598,12 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
auto chunkMax = BSON("a" << kMaxBSONKey);
// set histories
- chunk1.setHistory({ChunkHistory{Timestamp{100, 9}, _shardId}});
- chunk2.setHistory({ChunkHistory{Timestamp{200, 5}, _shardId}});
- chunk3.setHistory({ChunkHistory{Timestamp{156, 1}, _shardId}});
+ chunk1.setOnCurrentShardSince(Timestamp{100, 9});
+ chunk2.setOnCurrentShardSince(Timestamp{200, 5});
+ chunk3.setOnCurrentShardSince(Timestamp{156, 1});
+ chunk1.setHistory({ChunkHistory{*chunk1.getOnCurrentShardSince(), _shardId}});
+ chunk2.setHistory({ChunkHistory{*chunk2.getOnCurrentShardSince(), _shardId}});
+ chunk3.setHistory({ChunkHistory{*chunk3.getOnCurrentShardSince(), _shardId}});
// first chunk boundaries
chunk1.setMin(chunkMin);
@@ -641,6 +660,7 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
ASSERT_EQ(1UL, mergedChunk.getHistory().size());
ASSERT_EQ(chunk2.getHistory().front().getValidAfter(),
mergedChunk.getHistory().front().getValidAfter());
+ ASSERT_EQ(chunk2.getOnCurrentShardSince(), mergedChunk.getOnCurrentShardSince());
}
} // namespace
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index ce0c871c611..eedddf232e9 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -88,7 +88,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkMax = BSON("a" << 10);
chunk.setMin(chunkMin);
chunk.setMax(chunkMax);
- chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
+ chunk.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), ShardId(_shardName)),
ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
auto chunkSplitPoint = BSON("a" << 5);
@@ -150,6 +151,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
// Both chunks should have the same history
ASSERT(chunkDoc.getHistory() == otherChunkDoc.getHistory());
+ ASSERT(chunkDoc.getOnCurrentShardSince().has_value());
+ ASSERT_EQ(chunkDoc.getOnCurrentShardSince(), otherChunkDoc.getOnCurrentShardSince());
};
test(_nss2, Timestamp(42));
@@ -172,7 +175,8 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkMax = BSON("a" << 10);
chunk.setMin(chunkMin);
chunk.setMax(chunkMax);
- chunk.setHistory({ChunkHistory(Timestamp(100, 0), ShardId(_shardName)),
+ chunk.setOnCurrentShardSince(Timestamp(100, 0));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), ShardId(_shardName)),
ChunkHistory(Timestamp(90, 0), ShardId("shardY"))});
auto chunkSplitPoint = BSON("a" << 5);
@@ -239,6 +243,10 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
// Both chunks should have the same history
ASSERT(chunkDoc.getHistory() == midChunkDoc.getHistory());
ASSERT(midChunkDoc.getHistory() == lastChunkDoc.getHistory());
+
+ ASSERT(chunkDoc.getOnCurrentShardSince().has_value());
+ ASSERT_EQ(chunkDoc.getOnCurrentShardSince(), midChunkDoc.getOnCurrentShardSince());
+ ASSERT_EQ(midChunkDoc.getOnCurrentShardSince(), lastChunkDoc.getOnCurrentShardSince());
};
test(_nss2, Timestamp(42));
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index 908668aa87b..a79e2d8b41a 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -128,7 +128,8 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
chunk.setCollectionUUID(collUUID);
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
- chunk.setHistory({ChunkHistory(Timestamp(1, i), shard0.getName())});
+ chunk.setOnCurrentShardSince(Timestamp(1, i));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())});
chunk.setMin(BSON("a" << i));
chunk.setMax(BSON("a" << i + 1));
chunks.push_back(chunk);
@@ -147,7 +148,8 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
chunk.setCollectionUUID(toUUID);
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
- chunk.setHistory({ChunkHistory(Timestamp(1, i), shard0.getName())});
+ chunk.setOnCurrentShardSince(Timestamp(1, i));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())});
chunk.setMin(BSON("a" << i));
chunk.setMax(BSON("a" << i + 1));
originalToChunks.push_back(chunk);
@@ -230,7 +232,8 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsAreMet) {
chunk.setCollectionUUID(UUID::gen());
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
- chunk.setHistory({ChunkHistory(Timestamp(1, 1), shard0.getName())});
+ chunk.setOnCurrentShardSince(Timestamp(1, 1));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())});
chunk.setMin(kMinBSONKey);
chunk.setMax(kMaxBSONKey);
@@ -271,7 +274,8 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsTargetCollectionExists) {
chunk.setCollectionUUID(UUID::gen());
chunk.setVersion(chunkVersion);
chunk.setShard(shard0.getName());
- chunk.setHistory({ChunkHistory(Timestamp(1, 1), shard0.getName())});
+ chunk.setOnCurrentShardSince(Timestamp(1, 1));
+ chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())});
chunk.setMin(kMinBSONKey);
chunk.setMax(kMaxBSONKey);
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index b330697db0d..1d07942ecfd 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -60,6 +60,7 @@ const BSONField<bool> ChunkType::jumbo("jumbo");
const BSONField<Date_t> ChunkType::lastmod("lastmod");
const BSONField<BSONObj> ChunkType::history("history");
const BSONField<int64_t> ChunkType::estimatedSizeBytes("estimatedDataSizeBytes");
+const BSONField<Timestamp> ChunkType::onCurrentShardSince("onCurrentShardSince");
const BSONField<bool> ChunkType::historyIsAt40("historyIsAt40");
namespace {
@@ -238,6 +239,14 @@ StatusWith<ChunkType> ChunkType::_parseChunkBase(const BSONObj& source) {
}
{
+ Timestamp onCurrentShardSinceValue;
+ Status status = bsonExtractTimestampField(
+ source, onCurrentShardSince.name(), &onCurrentShardSinceValue);
+ chunk._onCurrentShardSince =
+ (status.isOK() ? (boost::optional<Timestamp>)onCurrentShardSinceValue : boost::none);
+ }
+
+ {
BSONElement historyObj;
Status status = bsonExtractTypedField(source, history.name(), Array, &historyObj);
if (status.isOK()) {
@@ -246,6 +255,8 @@ StatusWith<ChunkType> ChunkType::_parseChunkBase(const BSONObj& source) {
return history.getStatus();
chunk._history = std::move(history.getValue());
+ dassert(!chunk._onCurrentShardSince.has_value() ||
+ *chunk._onCurrentShardSince == chunk._history.front().getValidAfter());
} else if (status == ErrorCodes::NoSuchKey) {
// History is missing, so it will be presumed empty
} else {
@@ -470,6 +481,11 @@ BSONObj ChunkType::toConfigBSON() const {
static_cast<long long>(*_estimatedSizeBytes));
if (_jumbo)
builder.append(jumbo.name(), getJumbo());
+ if (_onCurrentShardSince) {
+ dassert(!_history.empty());
+ dassert(_history.front().getValidAfter() == *_onCurrentShardSince);
+ builder.append(onCurrentShardSince.name(), *_onCurrentShardSince);
+ }
addHistoryToBSON(builder);
return builder.obj();
}
@@ -484,6 +500,8 @@ BSONObj ChunkType::toShardBSON() const {
builder.append(max.name(), getMax());
builder.append(shard.name(), getShard().toString());
builder.appendTimestamp(lastmod.name(), _version->toLong());
+ if (_onCurrentShardSince)
+ builder.append(onCurrentShardSince.name(), *_onCurrentShardSince);
addHistoryToBSON(builder);
return builder.obj();
}
@@ -532,6 +550,10 @@ void ChunkType::setJumbo(bool jumbo) {
_jumbo = jumbo;
}
+void ChunkType::setOnCurrentShardSince(const Timestamp& onCurrentShardSince) {
+ _onCurrentShardSince = onCurrentShardSince;
+}
+
void ChunkType::addHistoryToBSON(BSONObjBuilder& builder) const {
if (_history.size()) {
BSONArrayBuilder arrayBuilder(builder.subarrayStart(history.name()));
@@ -590,6 +612,14 @@ Status ChunkType::validate() const {
str::stream() << "History contains an invalid shard "
<< _history.front().getShard()};
}
+ if (_onCurrentShardSince.has_value() &&
+ _history.front().getValidAfter() != *_onCurrentShardSince) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "The first `validAfter` in the chunk's `history` is not "
+ "consistent with `onCurrentShardSince`: validAfter is "
+ << _history.front().getValidAfter()
+ << " while onCurrentShardSince is " << *_onCurrentShardSince};
+ }
}
return Status::OK();
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index c2797112686..06768cc40cb 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -214,6 +214,7 @@ public:
static const BSONField<Date_t> lastmod;
static const BSONField<BSONObj> history;
static const BSONField<int64_t> estimatedSizeBytes;
+ static const BSONField<Timestamp> onCurrentShardSince;
static const BSONField<bool> historyIsAt40;
ChunkType();
@@ -222,14 +223,14 @@ public:
/**
* Constructs a new ChunkType object from BSON with the following format:
* {min: <>, max: <>, shard: <>, uuid: <>, history: <>, jumbo: <>, lastmod: <>,
- * lastmodEpoch: <>, lastmodTimestamp: <>}
+ * lastmodEpoch: <>, lastmodTimestamp: <>, onCurrentShardSince: <>}
*/
static StatusWith<ChunkType> parseFromNetworkRequest(const BSONObj& source);
/**
* Constructs a new ChunkType object from BSON with the following format:
* {_id: <>, min: <>, max: <>, shard: <>, uuid: <>, history: <>, jumbo: <>, lastmod: <>,
- * estimatedSizeByte: <>}
+ * estimatedSizeByte: <>, onCurrentShardSince: <>}
*
* Returns ErrorCodes::NoSuchKey if the '_id' field is missing
*/
@@ -239,7 +240,7 @@ public:
/**
* Constructs a new ChunkType object from BSON with the following format:
- * {_id: <>, max: <>, shard: <>, history: <>, lastmod: <>}
+ * {_id: <>, max: <>, shard: <>, history: <>, lastmod: <>, onCurrentShardSince: <>}
* Also does validation of the contents.
*/
static StatusWith<ChunkType> parseFromShardBSON(const BSONObj& source,
@@ -308,6 +309,11 @@ public:
}
void setJumbo(bool jumbo);
+ const boost::optional<Timestamp>& getOnCurrentShardSince() const {
+ return _onCurrentShardSince;
+ }
+ void setOnCurrentShardSince(const Timestamp& onCurrentShardSince);
+
void setHistory(std::vector<ChunkHistory> history) {
_history = std::move(history);
if (!_history.empty()) {
@@ -334,7 +340,7 @@ public:
private:
/**
* Parses the base chunk data on all usages:
- * {history: <>, shard: <>}
+ * {history: <>, shard: <>, onCurrentShardSince: <>}
*/
static StatusWith<ChunkType> _parseChunkBase(const BSONObj& source);
@@ -357,6 +363,8 @@ private:
boost::optional<int64_t> _estimatedSizeBytes;
// (O)(C) too big to move?
boost::optional<bool> _jumbo;
+ // (M)(C)(S) timestamp since this chunk belongs to the current shard
+ boost::optional<Timestamp> _onCurrentShardSince;
// history of the chunk
std::vector<ChunkHistory> _history;
};
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index 18c199b69ea..b7ad47a97fb 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -134,12 +134,17 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2);
BSONObj obj = BSON(
ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
- << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001")
+ << ChunkType::onCurrentShardSince() << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes = ChunkType::parseFromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -151,12 +156,17 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
- << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001")
+ << ChunkType::onCurrentShardSince() << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes = ChunkType::parseFromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -168,11 +178,16 @@ TEST(ChunkType, MinToMaxNotAscending) {
const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2);
BSONObj obj =
BSON(ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 20))
<< ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001")
+ << ChunkType::onCurrentShardSince() << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes = ChunkType::parseFromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_EQ(ErrorCodes::FailedToParse, chunkRes.getStatus());
}
@@ -183,11 +198,17 @@ TEST(ChunkType, ToFromConfigBSON) {
const auto collTimestamp = Timestamp(1);
const auto chunkID = OID::gen();
+ const auto onCurrentShardSince = Timestamp(4);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
- BSONObj obj = BSON(ChunkType::name(chunkID)
- << ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001")
- << "lastmod" << Timestamp(chunkVersion.toLong()));
+ BSONObj obj =
+ BSON(ChunkType::name(chunkID)
+ << ChunkType::collectionUUID() << collUuid << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20)) << ChunkType::shard("shard0001") << "lastmod"
+ << Timestamp(chunkVersion.toLong()) << ChunkType::onCurrentShardSince()
+ << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes = ChunkType::parseFromConfigBSON(obj, collEpoch, collTimestamp);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -201,6 +222,7 @@ TEST(ChunkType, ToFromConfigBSON) {
ASSERT_EQUALS(chunk.getVersion().toLong(), chunkVersion.toLong());
ASSERT_EQUALS(chunk.getVersion().epoch(), chunkVersion.epoch());
ASSERT_EQUALS(chunk.getShard(), "shard0001");
+ ASSERT_EQUALS(*chunk.getOnCurrentShardSince(), onCurrentShardSince);
ASSERT_OK(chunk.validate());
}
@@ -219,6 +241,7 @@ TEST(ChunkType, BothNsAndUUID) {
const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2);
BSONObj objModNS =
BSON(ChunkType::name(OID::gen())
@@ -226,7 +249,11 @@ TEST(ChunkType, BothNsAndUUID) {
<< mongo::UUID::gen() << ChunkType::min(BSON("a" << 10 << "b" << 10))
<< ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
<< "lastmodEpoch" << chunkVersion.epoch() << "lastmodTimestamp"
- << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
+ << chunkVersion.getTimestamp() << ChunkType::shard("shard0001")
+ << ChunkType::onCurrentShardSince() << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes =
ChunkType::parseFromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
@@ -237,13 +264,18 @@ TEST(ChunkType, UUIDPresentAndNsMissing) {
const auto collTimestamp = Timestamp(1);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2);
BSONObj objModNS = BSON(
ChunkType::name(OID::gen())
<< ChunkType::collectionUUID() << mongo::UUID::gen()
<< ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
<< "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch" << chunkVersion.epoch()
- << "lastmodTimestamp" << chunkVersion.getTimestamp() << ChunkType::shard("shard0001"));
+ << "lastmodTimestamp" << chunkVersion.getTimestamp() << ChunkType::shard("shard0001")
+ << ChunkType::onCurrentShardSince() << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")));
StatusWith<ChunkType> chunkRes =
ChunkType::parseFromConfigBSON(objModNS, collEpoch, collTimestamp);
ASSERT_TRUE(chunkRes.isOK());
@@ -254,6 +286,7 @@ TEST(ChunkType, ParseFromNetworkRequest) {
const auto collTimestamp = Timestamp(1, 0);
ChunkVersion chunkVersion({collEpoch, collTimestamp}, {1, 2});
+ const auto onCurrentShardSince = Timestamp(2, 0);
auto chunk = assertGet(ChunkType::parseFromNetworkRequest(
BSON(ChunkType::name(OID::gen())
@@ -262,7 +295,11 @@ TEST(ChunkType, ParseFromNetworkRequest) {
<< "lastmod"
<< BSON("e" << chunkVersion.epoch() << "t" << chunkVersion.getTimestamp() << "v"
<< Timestamp(chunkVersion.toLong()))
- << ChunkType::shard("shard0001"))));
+ << ChunkType::shard("shard0001") << ChunkType::onCurrentShardSince()
+ << onCurrentShardSince << ChunkType::history()
+ << BSON_ARRAY(BSON(ChunkHistoryBase::kValidAfterFieldName
+ << onCurrentShardSince << ChunkHistoryBase::kShardFieldName
+ << "shard0001")))));
ASSERT_EQ("shard0001", chunk.getShard());
ASSERT_EQ(chunkVersion, chunk.getVersion());
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index 936175610f7..38030014adc 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -528,7 +528,8 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
version.incMajor();
chunk1.setVersion(version);
chunk1.setShard(chunk0.getShard());
- chunk1.setHistory({ChunkHistory(Timestamp(20, 0), ShardId("0")),
+ chunk1.setOnCurrentShardSince(Timestamp(20, 0));
+ chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")),
ChunkHistory(Timestamp(1, 0), ShardId("1"))});
ChunkManager chunkManager(
diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp
index 61ffc30f269..8031f58e5b2 100644
--- a/src/mongo/s/chunk_test.cpp
+++ b/src/mongo/s/chunk_test.cpp
@@ -47,8 +47,9 @@ TEST(ChunkTest, HasMovedSincePinnedTimestamp) {
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
version,
kShardOne);
- chunkType.setHistory(
- {ChunkHistory(Timestamp(101, 0), kShardOne), ChunkHistory(Timestamp(100, 0), kShardTwo)});
+ chunkType.setOnCurrentShardSince(Timestamp(101, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), kShardOne),
+ ChunkHistory(Timestamp(100, 0), kShardTwo)});
ChunkInfo chunkInfo(chunkType);
Chunk chunk(chunkInfo, Timestamp(100, 0));
@@ -64,7 +65,8 @@ TEST(ChunkTest, HasMovedAndReturnedSincePinnedTimestamp) {
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
version,
kShardOne);
- chunkType.setHistory({ChunkHistory(Timestamp(102, 0), kShardOne),
+ chunkType.setOnCurrentShardSince(Timestamp(102, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), kShardOne),
ChunkHistory(Timestamp(101, 0), kShardTwo),
ChunkHistory(Timestamp(100, 0), kShardOne)});
@@ -82,8 +84,9 @@ TEST(ChunkTest, HasNotMovedSincePinnedTimestamp) {
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
version,
kShardOne);
- chunkType.setHistory(
- {ChunkHistory(Timestamp(100, 0), kShardOne), ChunkHistory(Timestamp(99, 0), kShardTwo)});
+ chunkType.setOnCurrentShardSince(Timestamp(100, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), kShardOne),
+ ChunkHistory(Timestamp(99, 0), kShardTwo)});
ChunkInfo chunkInfo(chunkType);
Chunk chunk(chunkInfo, Timestamp(100, 0));
@@ -100,7 +103,8 @@ TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_OneEntry) {
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
version,
kShardOne);
- chunkType.setHistory({ChunkHistory(Timestamp(101, 0), kShardOne)});
+ chunkType.setOnCurrentShardSince(Timestamp(101, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), kShardOne)});
ChunkInfo chunkInfo(chunkType);
Chunk chunk(chunkInfo, Timestamp(100, 0));
@@ -116,8 +120,9 @@ TEST(ChunkTest, HasNoHistoryValidForPinnedTimestamp_MoreThanOneEntry) {
ChunkRange{kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax()},
version,
kShardOne);
- chunkType.setHistory(
- {ChunkHistory(Timestamp(102, 0), kShardOne), ChunkHistory(Timestamp(101, 0), kShardTwo)});
+ chunkType.setOnCurrentShardSince(Timestamp(102, 0));
+ chunkType.setHistory({ChunkHistory(*chunkType.getOnCurrentShardSince(), kShardOne),
+ ChunkHistory(Timestamp(101, 0), kShardTwo)});
ChunkInfo chunkInfo(chunkType);
Chunk chunk(chunkInfo, Timestamp(100, 0));