summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMartin Neupauer <martin.neupauer@10gen.com>2018-03-26 19:30:26 -0400
committerMartin Neupauer <martin.neupauer@mongodb.com>2018-04-05 12:52:02 -0400
commita6d486b8a6e0c81771bd771cc0237236791d635d (patch)
tree61b49fae8cf316c033b2ac707202e50b281cc72a /src/mongo
parent6fbc1bbfcd5ffcfb451c300a6ef523f19d5edb55 (diff)
downloadmongo-a6d486b8a6e0c81771bd771cc0237236791d635d.tar.gz
SERVER-33523 Add timestamp support to CollectionMetadata
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp4
-rw-r--r--src/mongo/db/commands/count_cmd.cpp4
-rw-r--r--src/mongo/db/commands/create_indexes.cpp2
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/exec/update.cpp4
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp8
-rw-r--r--src/mongo/db/query/get_executor.cpp4
-rw-r--r--src/mongo/db/query/stage_builder.cpp2
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp2
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp2
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp24
-rw-r--r--src/mongo/db/s/collection_sharding_state.h8
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp8
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/metadata_manager.cpp29
-rw-r--r--src/mongo/db/s/metadata_manager.h15
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp4
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp10
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp6
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp27
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp6
-rw-r--r--src/mongo/db/s/split_chunk.cpp2
-rw-r--r--src/mongo/db/views/view_sharding_check.cpp2
-rw-r--r--src/mongo/s/SConscript3
-rw-r--r--src/mongo/s/catalog_cache.cpp16
-rw-r--r--src/mongo/s/catalog_cache.h8
-rw-r--r--src/mongo/s/metadata_filtering_test.cpp273
30 files changed, 417 insertions, 68 deletions
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index a5d26422d2a..8aa634a14c5 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -161,7 +161,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
}
// Make sure the source collection is not sharded.
- if (CollectionShardingState::get(opCtx, source)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, source)->getMetadata(opCtx)) {
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
}
@@ -189,7 +189,7 @@ Status renameCollectionCommon(OperationContext* opCtx,
invariant(source == target);
return Status::OK();
}
- if (CollectionShardingState::get(opCtx, target)->getMetadata()) {
+ if (CollectionShardingState::get(opCtx, target)->getMetadata(opCtx)) {
return {ErrorCodes::IllegalOperation, "cannot rename to a sharded collection"};
}
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 91ba0169b0c..616f9d3db03 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -150,7 +150,7 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata();
+ auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), true /*explain*/);
@@ -202,7 +202,7 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata();
+ auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), false /*explain*/);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 1606db4e7c7..9ef02eecc0a 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -420,7 +420,7 @@ private:
const BSONObj& newIdxKey) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
- auto metadata(CollectionShardingState::get(opCtx, nss)->getMetadata());
+ auto metadata(CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx));
if (metadata) {
ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 4c6ff7a0107..f76dbef2a67 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -229,7 +229,7 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into geoNear.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata();
+ auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
const PlanExecutor::YieldPolicy yieldPolicy =
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 0cf5d3314b4..aed0a3af04e 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1424,7 +1424,7 @@ public:
// Get metadata before we check our version, to make sure it doesn't increment in the
// meantime
AutoGetCollectionForReadCommand autoColl(opCtx, config.nss);
- return CollectionShardingState::get(opCtx, config.nss)->getMetadata();
+ return CollectionShardingState::get(opCtx, config.nss)->getMetadata(opCtx);
}();
bool shouldHaveData = false;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 44e4513440a..a044324197c 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -135,7 +135,7 @@ bool shouldRestartUpdateIfNoLongerMatches(const UpdateStageParams& params) {
const std::vector<std::unique_ptr<FieldRef>>* getImmutableFields(OperationContext* opCtx,
const NamespaceString& ns) {
- auto metadata = CollectionShardingState::get(opCtx, ns)->getMetadata();
+ auto metadata = CollectionShardingState::get(opCtx, ns)->getMetadata(opCtx);
if (metadata) {
const std::vector<std::unique_ptr<FieldRef>>& fields = metadata->getKeyPatternFields();
// Return shard-keys as immutable for the update system.
@@ -294,7 +294,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
args.uuid = _collection->uuid();
args.stmtId = request->getStmtId();
args.update = logObj;
- args.criteria = css->getMetadata().extractDocumentKey(newObj);
+ args.criteria = css->getMetadata(getOpCtx()).extractDocumentKey(newObj);
uassert(16980,
"Multi-update operations require all documents to have an '_id' field",
!request->isMulti() || args.criteria.hasField("_id"_sd));
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index c60c94617b3..c3cfded11b7 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -481,7 +481,7 @@ void OpObserverImpl::aboutToDelete(OperationContext* opCtx,
BSONObj const& doc) {
auto& deleteState = getDeleteState(opCtx);
auto* css = CollectionShardingState::get(opCtx, nss);
- deleteState = css->makeDeleteState(doc);
+ deleteState = css->makeDeleteState(opCtx, doc);
}
void OpObserverImpl::onDelete(OperationContext* opCtx,
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 18a28696aca..043452c3e0f 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -156,7 +156,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, collection->ns().ns())) {
auto shardFilterStage = stdx::make_unique<ShardFilterStage>(
opCtx,
- CollectionShardingState::get(opCtx, collection->ns())->getMetadata(),
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadata(opCtx),
ws.get(),
stage.release());
return PlanExecutor::make(opCtx,
@@ -591,7 +591,7 @@ bool PipelineD::MongoDInterface::isSharded(OperationContext* opCtx, const Namesp
// TODO SERVER-24960: Use CollectionShardingState::collectionIsSharded() to confirm sharding
// state.
auto css = CollectionShardingState::get(opCtx, nss);
- return bool(css->getMetadata());
+ return bool(css->getMetadata(opCtx));
}
BSONObj PipelineD::MongoDInterface::insert(const boost::intrusive_ptr<ExpressionContext>& expCtx,
@@ -728,7 +728,7 @@ Status PipelineD::MongoDInterface::attachCursorSourceToPipeline(
auto css = CollectionShardingState::get(expCtx->opCtx, expCtx->ns);
uassert(4567,
str::stream() << "from collection (" << expCtx->ns.ns() << ") cannot be sharded",
- !bool(css->getMetadata()));
+ !bool(css->getMetadata(expCtx->opCtx)));
PipelineD::prepareCursorSource(autoColl->getCollection(), expCtx->ns, nullptr, pipeline);
@@ -751,7 +751,7 @@ std::vector<FieldPath> PipelineD::MongoDInterface::collectDocumentKeyFields(
auto scm = [this, opCtx, &nss]() -> ScopedCollectionMetadata {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata();
+ return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
}();
if (!scm) {
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 9bc7c37454f..b1dcebfff3d 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -177,7 +177,7 @@ void fillOutPlannerParams(OperationContext* opCtx,
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
auto collMetadata =
- CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata();
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata(opCtx);
if (collMetadata) {
plannerParams->shardKey = collMetadata->getKeyPattern();
} else {
@@ -278,7 +278,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
root = make_unique<ShardFilterStage>(
opCtx,
- CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata(),
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata(opCtx),
ws,
root.release());
}
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index caf108aa019..2834498dc03 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -296,7 +296,7 @@ PlanStage* buildStages(OperationContext* opCtx,
}
return new ShardFilterStage(
opCtx,
- CollectionShardingState::get(opCtx, collection->ns())->getMetadata(),
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadata(opCtx),
ws,
childStage);
}
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index bd4716401a9..1177a8dbf71 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -79,7 +79,7 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
{
AutoGetCollection autoColl(opCtx, ns, MODE_IX);
const auto css = CollectionShardingState::get(opCtx, ns);
- auto metadata = css->getMetadata();
+ auto metadata = css->getMetadata(opCtx);
if (!metadata) {
log() << "skipping orphaned data cleanup for " << ns.toString()
<< ", collection is not sharded";
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index b575538ecc8..9f90ed44e02 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -109,7 +109,7 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
auto* const css = CollectionShardingState::get(opCtx, nss);
auto* const self = forTestOnly ? forTestOnly : &css->_metadataManager->_rangesToClean;
- auto scopedCollectionMetadata = css->getMetadata();
+ auto scopedCollectionMetadata = css->getMetadata(opCtx);
if (!forTestOnly && (!collection || !scopedCollectionMetadata)) {
if (!collection) {
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 9fd8c6c887e..5bafa1f37fd 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -125,14 +125,14 @@ public:
return *it->second;
}
- void report(BSONObjBuilder* builder) {
+ void report(OperationContext* opCtx, BSONObjBuilder* builder) {
BSONObjBuilder versionB(builder->subobjStart("versions"));
{
stdx::lock_guard<stdx::mutex> lg(_mutex);
for (auto& coll : _collections) {
- ScopedCollectionMetadata metadata = coll.second->getMetadata();
+ ScopedCollectionMetadata metadata = coll.second->getMetadata(opCtx);
if (metadata) {
versionB.appendTimestamp(coll.first, metadata->getShardVersion().toLong());
} else {
@@ -182,11 +182,14 @@ CollectionShardingState* CollectionShardingState::get(OperationContext* opCtx,
void CollectionShardingState::report(OperationContext* opCtx, BSONObjBuilder* builder) {
auto& collectionsMap = getCollectionShardingStateMap(opCtx->getServiceContext());
- collectionsMap.report(builder);
+ collectionsMap.report(opCtx, builder);
}
-ScopedCollectionMetadata CollectionShardingState::getMetadata() {
- return _metadataManager->getActiveMetadata(_metadataManager);
+ScopedCollectionMetadata CollectionShardingState::getMetadata(OperationContext* opCtx) {
+ // TODO: SERVER-34276 - find an alternative to get the atClusterTime.
+ auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime();
+ return atClusterTime ? _metadataManager->createMetadataAt(opCtx, atClusterTime.get())
+ : _metadataManager->getActiveMetadata(_metadataManager);
}
void CollectionShardingState::refreshMetadata(OperationContext* opCtx,
@@ -261,8 +264,8 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx)
}
}
-bool CollectionShardingState::collectionIsSharded() {
- auto metadata = getMetadata().getMetadata();
+bool CollectionShardingState::collectionIsSharded(OperationContext* opCtx) {
+ auto metadata = getMetadata(opCtx).getMetadata();
if (metadata && (metadata->getCollVersion().isStrictlyEqualTo(ChunkVersion::UNSHARDED()))) {
return false;
}
@@ -352,8 +355,9 @@ void CollectionShardingState::onUpdateOp(OperationContext* opCtx,
}
}
-auto CollectionShardingState::makeDeleteState(BSONObj const& doc) -> DeleteState {
- return {getMetadata().extractDocumentKey(doc).getOwned(),
+auto CollectionShardingState::makeDeleteState(OperationContext* opCtx, BSONObj const& doc)
+ -> DeleteState {
+ return {getMetadata(opCtx).extractDocumentKey(doc).getOwned(),
_sourceMgr && _sourceMgr->getCloner()->isDocumentInMigratingChunk(doc)};
}
@@ -407,7 +411,7 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
}
// Set this for error messaging purposes before potentially returning false.
- auto metadata = getMetadata();
+ auto metadata = getMetadata(opCtx);
*actualShardVersion = metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
auto criticalSectionSignal = _critSec.getSignal(opCtx->lockState()->isWriteLocked()
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 51ff7d26435..6aa2463dc9d 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -75,7 +75,7 @@ public:
bool isMigrating;
};
- DeleteState makeDeleteState(BSONObj const& doc);
+ DeleteState makeDeleteState(OperationContext* opCtx, BSONObj const& doc);
/**
* Obtains the sharding state for the specified collection. If it does not exist, it will be
@@ -93,8 +93,10 @@ public:
* Returns the chunk metadata for the collection. The metadata it represents lives as long as
* the object itself, and the collection, exist. After dropping the collection lock, the
* collection may no longer exist, but it is still safe to destroy the object.
+ * The metadata is tied to a specific point in time (atClusterTime) and the time is retrieved
+ * from the operation context (opCtx).
*/
- ScopedCollectionMetadata getMetadata();
+ ScopedCollectionMetadata getMetadata(OperationContext* opCtx);
/**
* BSON output of the pending metadata into a BSONArray
@@ -198,7 +200,7 @@ public:
* Returns whether this collection is sharded. Valid only if mongoD is primary.
* TODO SERVER-24960: This method may return a false positive until SERVER-24960 is fixed.
*/
- bool collectionIsSharded();
+ bool collectionIsSharded(OperationContext* opCtx);
/**
* Tracks deletion of any documents within the range, returning when deletion is complete.
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index 63db860c50d..72f01c98f53 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -156,7 +156,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
// First, check that an order for deletion from an unsharded collection (where css has not been
// "refreshed" with chunk metadata) extracts just the "_id" field:
- auto deleteState = css->makeDeleteState(doc);
+ auto deleteState = css->makeDeleteState(operationContext(), doc);
ASSERT_BSONOBJ_EQ(deleteState.documentKey,
BSON("_id"
<< "hello"));
@@ -181,7 +181,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
<< true);
// Verify the shard key is extracted, in correct order, followed by the "_id" field.
- auto deleteState = css->makeDeleteState(doc);
+ auto deleteState = css->makeDeleteState(operationContext(), doc);
ASSERT_BSONOBJ_EQ(deleteState.documentKey,
BSON("key" << 100 << "key3"
<< "abc"
@@ -207,7 +207,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
<< 100);
// Verify the shard key is extracted with "_id" in the right place.
- auto deleteState = css->makeDeleteState(doc);
+ auto deleteState = css->makeDeleteState(operationContext(), doc);
ASSERT_BSONOBJ_EQ(deleteState.documentKey,
BSON("key" << 100 << "_id"
<< "hello"
@@ -231,7 +231,7 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
<< 100);
// Verify the shard key is extracted with "_id" in the right place, not hashed.
- auto deleteState = css->makeDeleteState(doc);
+ auto deleteState = css->makeDeleteState(operationContext(), doc);
ASSERT_BSONOBJ_EQ(deleteState.documentKey,
BSON("_id"
<< "hello"));
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 325a7fbc5fc..4fd8a6d2261 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -109,7 +109,7 @@ public:
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
CollectionShardingState* const css = CollectionShardingState::get(opCtx, nss);
- const auto metadata = css->getMetadata();
+ const auto metadata = css->getMetadata(opCtx);
if (metadata) {
result.appendTimestamp("global", metadata->getShardVersion().toLong());
} else {
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 2405dbc5620..5c1a3fbc321 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -62,7 +62,7 @@ bool checkMetadataForSuccess(OperationContext* opCtx,
const BSONObj& maxKey) {
const auto metadataAfterMerge = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata();
+ return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
}();
uassert(ErrorCodes::StaleEpoch,
@@ -107,7 +107,7 @@ Status mergeChunks(OperationContext* opCtx,
const auto metadata = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata();
+ return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
}();
if (!metadata) {
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 60b8edca338..070b8b0710a 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -40,6 +40,8 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/s/catalog_cache.h"
+#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/fail_point_service.h"
@@ -190,6 +192,27 @@ ScopedCollectionMetadata MetadataManager::getActiveMetadata(std::shared_ptr<Meta
return ScopedCollectionMetadata();
}
+ScopedCollectionMetadata MetadataManager::createMetadataAt(OperationContext* opCtx,
+ LogicalTime atClusterTime) {
+ auto cache = Grid::get(opCtx)->catalogCache();
+ if (!cache) {
+ return ScopedCollectionMetadata();
+ }
+
+ auto routingTable = cache->getCollectionRoutingTableHistoryNoRefresh(_nss);
+ if (!routingTable) {
+ return ScopedCollectionMetadata();
+ }
+ auto cm = std::make_shared<ChunkManager>(routingTable, atClusterTime.asTimestamp());
+
+ CollectionMetadata metadata(std::move(cm), ShardingState::get(opCtx)->getShardName());
+
+ auto metadataTracker =
+ std::make_shared<MetadataManager::CollectionMetadataTracker>(std::move(metadata));
+
+ return ScopedCollectionMetadata(std::move(metadataTracker));
+}
+
size_t MetadataManager::numberOfMetadataSnapshots() const {
stdx::lock_guard<stdx::mutex> lg(_managerLock);
if (_metadata.empty())
@@ -540,6 +563,12 @@ ScopedCollectionMetadata::ScopedCollectionMetadata(
++_metadataTracker->usageCounter;
}
+ScopedCollectionMetadata::ScopedCollectionMetadata(
+ std::shared_ptr<MetadataManager::CollectionMetadataTracker> metadataTracker)
+ : _metadataTracker(std::move(metadataTracker)) {
+ invariant(_metadataTracker);
+}
+
ScopedCollectionMetadata::ScopedCollectionMetadata(ScopedCollectionMetadata&& other) {
*this = std::move(other);
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index c1ececdfae7..9fe3a813dea 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -69,6 +69,12 @@ public:
ScopedCollectionMetadata getActiveMetadata(std::shared_ptr<MetadataManager> self);
/**
+ * Creates the metadata on demand for a specific point in time. The object is not tracked by
+ * the metadata manager.
+ */
+ ScopedCollectionMetadata createMetadataAt(OperationContext* opCtx, LogicalTime atCusterTime);
+
+ /**
* Returns the number of CollectionMetadata objects being maintained on behalf of running
* queries. The actual number may vary after it returns, so this is really only useful for unit
* tests.
@@ -291,6 +297,9 @@ private:
friend ScopedCollectionMetadata MetadataManager::getActiveMetadata(
std::shared_ptr<MetadataManager>);
+ friend ScopedCollectionMetadata MetadataManager::createMetadataAt(OperationContext*,
+ LogicalTime);
+
friend std::vector<ScopedCollectionMetadata> MetadataManager::overlappingMetadata(
std::shared_ptr<MetadataManager> const&, ChunkRange const&);
@@ -311,6 +320,12 @@ private:
std::shared_ptr<MetadataManager::CollectionMetadataTracker> metadataTracker);
/**
+ * Metadata not tracked by the manager - created on demand for a specific point in time.
+ */
+ ScopedCollectionMetadata(
+ std::shared_ptr<MetadataManager::CollectionMetadataTracker> metadataTracker);
+
+ /**
* Disconnect from the CollectionMetadata, possibly triggering GC of unused CollectionMetadata.
*/
void _clear();
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 81541e3daaf..1f311cec6d1 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1112,7 +1112,7 @@ CollectionShardingState::CleanupNotification MigrationDestinationManager::_noteP
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
- auto metadata = css->getMetadata();
+ auto metadata = css->getMetadata(opCtx);
// This can currently happen because drops aren't synchronized with in-migrations. The idea for
// checking this here is that in the future we shouldn't have this problem.
@@ -1146,7 +1146,7 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx,
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
- auto metadata = css->getMetadata();
+ auto metadata = css->getMetadata(opCtx);
// This can currently happen because drops aren't synchronized with in-migrations. The idea for
// checking this here is that in the future we shouldn't have this problem.
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index bb0dc85c807..253496d2832 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -163,7 +163,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
collectionUUID = autoColl.getCollection()->uuid().value();
}
- auto metadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
+ auto metadata = CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx);
uassert(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "cannot move chunks for an unsharded collection",
metadata);
@@ -238,7 +238,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, getNss());
- const auto metadata = css->getMetadata();
+ const auto metadata = css->getMetadata(opCtx);
Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
if (!status.isOK())
return status;
@@ -293,7 +293,7 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
const auto metadata = [&] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata();
+ return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(opCtx);
}();
Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
@@ -390,7 +390,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
const auto metadata = [&] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata();
+ return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(opCtx);
}();
Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
@@ -545,7 +545,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
auto refreshedMetadata = [&] {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, getNss())->getMetadata();
+ return CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx);
}();
if (!refreshedMetadata) {
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 63818e7fe89..6dca94efffd 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -229,8 +229,8 @@ public:
auto css = CollectionShardingState::get(opCtx, nss);
const ChunkVersion collectionShardVersion =
- (css->getMetadata() ? css->getMetadata()->getShardVersion()
- : ChunkVersion::UNSHARDED());
+ (css->getMetadata(opCtx) ? css->getMetadata(opCtx)->getShardVersion()
+ : ChunkVersion::UNSHARDED());
if (requestedVersion.isWriteCompatibleWith(collectionShardVersion)) {
// MongoS and MongoD agree on what is the collection's shard version
@@ -341,7 +341,7 @@ public:
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
ChunkVersion currVersion = ChunkVersion::UNSHARDED();
- auto collMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
+ auto collMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
if (collMetadata) {
currVersion = collMetadata->getShardVersion();
}
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index eed8ca38df0..e641d43578e 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -72,7 +72,7 @@ Status onShardVersionMismatch(OperationContext* opCtx,
const auto currentShardVersion = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- const auto currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
+ const auto currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
if (currentMetadata) {
return currentMetadata->getShardVersion();
}
@@ -122,15 +122,14 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
{
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- auto css = CollectionShardingState::get(opCtx, nss);
+ auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
// We already have newer version
- if (css->getMetadata() &&
- css->getMetadata()->getCollVersion().epoch() == cm->getVersion().epoch() &&
- css->getMetadata()->getCollVersion() >= cm->getVersion()) {
+ if (metadata && metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
+ metadata->getCollVersion() >= cm->getVersion()) {
LOG(1) << "Skipping refresh of metadata for " << nss << " "
- << css->getMetadata()->getCollVersion() << " with an older " << cm->getVersion();
- return css->getMetadata()->getShardVersion();
+ << metadata->getCollVersion() << " with an older " << cm->getVersion();
+ return metadata->getShardVersion();
}
}
@@ -138,14 +137,14 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
+ auto metadata = css->getMetadata(opCtx);
// We already have newer version
- if (css->getMetadata() &&
- css->getMetadata()->getCollVersion().epoch() == cm->getVersion().epoch() &&
- css->getMetadata()->getCollVersion() >= cm->getVersion()) {
- LOG(1) << "Skipping refresh of metadata for " << nss << " "
- << css->getMetadata()->getCollVersion() << " with an older " << cm->getVersion();
- return css->getMetadata()->getShardVersion();
+ if (metadata && metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
+ metadata->getCollVersion() >= cm->getVersion()) {
+ LOG(1) << "Skipping refresh of metadata for " << nss << " " << metadata->getCollVersion()
+ << " with an older " << cm->getVersion();
+ return metadata->getShardVersion();
}
std::unique_ptr<CollectionMetadata> newCollectionMetadata =
@@ -153,7 +152,7 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
css->refreshMetadata(opCtx, std::move(newCollectionMetadata));
- return css->getMetadata()->getShardVersion();
+ return css->getMetadata(opCtx)->getShardVersion();
}
void onDbVersionMismatch(OperationContext* opCtx,
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index aabaeebbd77..73083621d78 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -202,7 +202,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
std::vector<InsertStatement>::const_iterator end,
bool fromMigrate) {
auto const css = CollectionShardingState::get(opCtx, nss);
- const auto metadata = css->getMetadata();
+ const auto metadata = css->getMetadata(opCtx);
for (auto it = begin; it != end; ++it) {
const auto& insertedDoc = it->doc;
@@ -228,7 +228,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
auto const css = CollectionShardingState::get(opCtx, args.nss);
- const auto metadata = css->getMetadata();
+ const auto metadata = css->getMetadata(opCtx);
if (args.nss.ns() == NamespaceString::kShardConfigCollectionsCollectionName) {
// Notification of routing table changes are only needed on secondaries
@@ -293,7 +293,7 @@ void ShardServerOpObserver::aboutToDelete(OperationContext* opCtx,
BSONObj const& doc) {
auto& deleteState = getDeleteState(opCtx);
auto* css = CollectionShardingState::get(opCtx, nss.ns());
- deleteState = css->makeDeleteState(doc);
+ deleteState = css->makeDeleteState(opCtx, doc);
}
void ShardServerOpObserver::onDelete(OperationContext* opCtx,
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index ca9da994e73..0b7e287a774 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -98,7 +98,7 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
const std::vector<BSONObj>& splitKeys) {
const auto metadataAfterSplit = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata();
+ return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
}();
uassert(ErrorCodes::StaleEpoch,
diff --git a/src/mongo/db/views/view_sharding_check.cpp b/src/mongo/db/views/view_sharding_check.cpp
index 9f6301dda12..7c1ec2c5f86 100644
--- a/src/mongo/db/views/view_sharding_check.cpp
+++ b/src/mongo/db/views/view_sharding_check.cpp
@@ -73,7 +73,7 @@ bool ViewShardingCheck::collectionIsSharded(OperationContext* opCtx, const Names
// and must be for a sharding check.
dassert(opCtx->lockState()->isDbLockedForMode(nss.db(), MODE_IS));
AutoGetCollection autoGetCol(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->collectionIsSharded();
+ return CollectionShardingState::get(opCtx, nss)->collectionIsSharded(opCtx);
}
} // namespace mongo
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 581c96b2cdc..57e488774b3 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -357,9 +357,12 @@ env.CppUnitTest(
'catalog_cache_refresh_test.cpp',
'chunk_manager_index_bounds_test.cpp',
'chunk_manager_query_test.cpp',
+ 'metadata_filtering_test.cpp',
'shard_key_pattern_test.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/s/sharding',
+ '$BUILD_DIR/mongo/db/serveronly',
'catalog_cache_test_fixture',
]
)
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index e0f08ed979a..960df99ae17 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -271,6 +271,22 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getShardedCollectionRoutin
return routingInfoStatus;
}
+std::shared_ptr<RoutingTableHistory> CatalogCache::getCollectionRoutingTableHistoryNoRefresh(
+ const NamespaceString& nss) {
+ stdx::lock_guard<stdx::mutex> lg(_mutex);
+
+ const auto itDb = _collectionsByDb.find(nss.db());
+ if (itDb == _collectionsByDb.end()) {
+ return nullptr;
+ }
+ const auto itColl = itDb->second.find(nss.ns());
+ if (itColl == itDb->second.end()) {
+ return nullptr;
+ }
+ return itColl->second->routingInfo;
+}
+
+
void CatalogCache::onStaleDatabaseVersion(const StringData dbName,
const DatabaseVersion& databaseVersion) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index c1394a000d8..9903f19b3ec 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -116,6 +116,14 @@ public:
OperationContext* opCtx, const NamespaceString& nss);
/**
+ * Retuns the routing history table for the collection instead of the chunk manager (the chunk
+ * manager is a part of CachedCollectionRoutingInfo). The chunk manager represents a specific
+ * state at some point in time, on the other hand a routing history table has the whole history.
+ */
+ std::shared_ptr<RoutingTableHistory> getCollectionRoutingTableHistoryNoRefresh(
+ const NamespaceString& nss);
+
+ /**
* Non-blocking method that marks the current cached database entry as needing refresh if the
* entry's databaseVersion matches 'databaseVersion'.
*
diff --git a/src/mongo/s/metadata_filtering_test.cpp b/src/mongo/s/metadata_filtering_test.cpp
new file mode 100644
index 00000000000..c00e165f0c0
--- /dev/null
+++ b/src/mongo/s/metadata_filtering_test.cpp
@@ -0,0 +1,273 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/query/query_request.h"
+#include "mongo/db/s/metadata_manager.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/catalog_cache.h"
+#include "mongo/s/catalog_cache_test_fixture.h"
+
+namespace mongo {
+namespace {
+
+using executor::RemoteCommandRequest;
+using unittest::assertGet;
+
+const NamespaceString kNss("TestDB", "TestColl");
+
+class MetadataFilteringTest : public CatalogCacheTestFixture {
+protected:
+ void setUp() override {
+ CatalogCacheTestFixture::setUp();
+
+ setupNShards(2);
+
+ _manager = std::make_shared<MetadataManager>(serviceContext(), kNss, executor());
+ }
+
+ void expectGetDatabase() {
+ expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
+ DatabaseType db(kNss.db().toString(), {"0"}, true);
+ return std::vector<BSONObj>{db.toBSON()};
+ }());
+ }
+
+ void expectGetCollection(OID epoch, const ShardKeyPattern& shardKeyPattern) {
+ expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
+ CollectionType collType;
+ collType.setNs(kNss);
+ collType.setEpoch(epoch);
+ collType.setKeyPattern(shardKeyPattern.toBSON());
+ collType.setUnique(false);
+
+ return std::vector<BSONObj>{collType.toBSON()};
+ }());
+ }
+
+ // Prepares data with a history array populated:
+ // chunk1 - [min, -100)
+ // chunk2 - [100, 0)
+ // chunk3 - [0, 100)
+ // chunk4 - [100, max)
+ // and the history:
+ // time (now,75) shard0(chunk1, chunk3) shard1(chunk2, chunk4)
+ // time (75,25) shard0(chunk2, chunk4) shard1(chunk1, chunk3)
+ // time (25,0) - no history
+ void prepareTestData() {
+ const OID epoch = OID::gen();
+ const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
+
+ auto future = scheduleRoutingInfoRefresh(kNss);
+
+ expectGetDatabase();
+ expectGetCollection(epoch, shardKeyPattern);
+
+ expectGetCollection(epoch, shardKeyPattern);
+ expectFindSendBSONObjVector(kConfigHostAndPort, [&]() {
+ ChunkVersion version(1, 0, epoch);
+
+ ChunkType chunk1(kNss,
+ {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)},
+ version,
+ {"0"});
+ chunk1.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")),
+ ChunkHistory(Timestamp(25, 0), ShardId("1"))});
+ version.incMinor();
+
+ ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"});
+ chunk2.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")),
+ ChunkHistory(Timestamp(25, 0), ShardId("0"))});
+ version.incMinor();
+
+ ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"});
+ chunk3.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")),
+ ChunkHistory(Timestamp(25, 0), ShardId("1"))});
+ version.incMinor();
+
+ ChunkType chunk4(kNss,
+ {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()},
+ version,
+ {"1"});
+ chunk4.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")),
+ ChunkHistory(Timestamp(25, 0), ShardId("0"))});
+ version.incMinor();
+
+ return std::vector<BSONObj>{chunk1.toConfigBSON(),
+ chunk2.toConfigBSON(),
+ chunk3.toConfigBSON(),
+ chunk4.toConfigBSON()};
+ }());
+
+ auto routingInfo = future.timed_get(kFutureTimeout);
+ ASSERT(routingInfo->cm());
+ auto cm = routingInfo->cm();
+
+ ASSERT_EQ(4, cm->numChunks());
+ }
+
+ std::shared_ptr<MetadataManager> _manager;
+};
+
+// Verifies that right set of documents is visible.
+TEST_F(MetadataFilteringTest, FilterDocumentsPresent) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ auto metadata = _manager->createMetadataAt(operationContext(), LogicalTime(Timestamp(100, 0)));
+
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -500)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 50)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -50)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 500)));
+}
+
+// Verifies that a different set of documents is visible for a timestamp in the past.
+TEST_F(MetadataFilteringTest, FilterDocumentsPast) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ auto metadata = _manager->createMetadataAt(operationContext(), LogicalTime(Timestamp(50, 0)));
+
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -500)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 50)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -50)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 500)));
+}
+
+// Verifies that when accessing too far into the past we get the stale error.
+TEST_F(MetadataFilteringTest, FilterDocumentsStale) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ auto metadata = _manager->createMetadataAt(operationContext(), LogicalTime(Timestamp(10, 0)));
+
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -500)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 50)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -50)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 500)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+}
+
+// The same test as FilterDocumentsPresent but using "readConcern"
+TEST_F(MetadataFilteringTest, FilterDocumentsPresentShardingState) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ BSONObj readConcern = BSON("readConcern" << BSON("level"
+ << "snapshot"
+ << "atClusterTime"
+ << Timestamp(100, 0)));
+
+ auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
+ ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
+
+ auto css = CollectionShardingState::get(operationContext(), kNss);
+
+ auto metadata = css->getMetadata(operationContext());
+
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -500)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 50)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -50)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 500)));
+}
+
+// The same test as FilterDocumentsPast but using "readConcern"
+TEST_F(MetadataFilteringTest, FilterDocumentsPastShardingState) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ BSONObj readConcern = BSON("readConcern" << BSON("level"
+ << "snapshot"
+ << "atClusterTime"
+ << Timestamp(50, 0)));
+
+ auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
+ ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
+
+ auto css = CollectionShardingState::get(operationContext(), kNss);
+
+ auto metadata = css->getMetadata(operationContext());
+
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -500)));
+ ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 50)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -50)));
+ ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 500)));
+}
+
+// The same test as FilterDocumentsStale but using "readConcern"
+TEST_F(MetadataFilteringTest, FilterDocumentsStaleShardingState) {
+ prepareTestData();
+
+ ShardingState::get(operationContext())->setEnabledForTest(ShardId("0").toString());
+
+ BSONObj readConcern = BSON("readConcern" << BSON("level"
+ << "snapshot"
+ << "atClusterTime"
+ << Timestamp(10, 0)));
+
+ auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext());
+ ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"]));
+
+ auto css = CollectionShardingState::get(operationContext(), kNss);
+
+ auto metadata = css->getMetadata(operationContext());
+
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -500)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 50)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -50)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+ ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 500)),
+ AssertionException,
+ ErrorCodes::StaleChunkHistory);
+}
+
+
+} // namespace
+} // namespace mongo