summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2020-02-26 05:20:35 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-26 12:15:07 +0000
commitb41cd4b2c416e965fc3541c97cd0b51563d90c40 (patch)
treeae733156e4785a7e523369faa8a2a2c4607454ce
parent9e69da32cea83fc45724213215a8de40d2635f45 (diff)
downloadmongo-b41cd4b2c416e965fc3541c97cd0b51563d90c40.tar.gz
SERVER-45599 Backport of SERVER-32198: Split CollectionShardingState::getMetadata into three methods
o getCurrentMetadataIfKnown - which returns the current filtering metadata if any is available o getMetadataForOperation - which returns the metadata which is required by the current opertion, based on the OperationShardingState o getCurrentMetadata - which returns the currently available filtering metadata (or UNSHARDED if not known) This is in preparation for making getMetadataForOperation/getCurrentMetadata throw StaleShardVersion exception if the metadata has not been loaded yet. This is a partial cherry-pick from dcf7e0dd89d34f58b592f1adb3d41e5edd6e2012, adapted for the 4.0 branch.
-rw-r--r--jstests/sharding/cursor_valid_after_shard_stepdown.js3
-rw-r--r--jstests/sharding/dump_coll_metadata.js10
-rw-r--r--jstests/sharding/features1.js124
-rw-r--r--src/mongo/base/global_initializer_registerer.cpp1
-rw-r--r--src/mongo/db/catalog/create_collection.cpp17
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp12
-rw-r--r--src/mongo/db/commands/SConscript2
-rw-r--r--src/mongo/db/commands/count_cmd.cpp6
-rw-r--r--src/mongo/db/commands/create_indexes.cpp73
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp56
-rw-r--r--src/mongo/db/exec/update.cpp7
-rw-r--r--src/mongo/db/op_observer_impl.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp8
-rw-r--r--src/mongo/db/query/get_executor.cpp7
-rw-r--r--src/mongo/db/query/stage_builder.cpp10
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp11
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp13
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp36
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp90
-rw-r--r--src/mongo/db/s/collection_sharding_state.h20
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp45
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp40
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp257
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp18
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp102
-rw-r--r--src/mongo/db/s/migration_source_manager.h2
-rw-r--r--src/mongo/db/s/set_shard_version_command.cpp21
-rw-r--r--src/mongo/db/s/shard_filtering_metadata_refresh.cpp76
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp8
-rw-r--r--src/mongo/db/s/split_chunk.cpp10
-rw-r--r--src/mongo/s/stale_exception.cpp2
32 files changed, 570 insertions, 522 deletions
diff --git a/jstests/sharding/cursor_valid_after_shard_stepdown.js b/jstests/sharding/cursor_valid_after_shard_stepdown.js
index ea0dd72338f..01e7ee57c30 100644
--- a/jstests/sharding/cursor_valid_after_shard_stepdown.js
+++ b/jstests/sharding/cursor_valid_after_shard_stepdown.js
@@ -38,10 +38,11 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
assert.eq(0, getMoreCursor.id);
assert.eq(2, getMoreCursor.nextBatch[0].x);
+ // After stepdown, the shard version will be reset
var shardVersionAfterStepdown =
assert.commandWorked(st.rs0.getPrimary().adminCommand({getShardVersion: 'TestDB.TestColl'}))
.global;
- assert.eq(Timestamp(0, 0), shardVersionAfterStepdown);
+ assert.eq("UNKNOWN", shardVersionAfterStepdown);
st.stop();
})();
diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js
index 30726f4746d..c1254985dd6 100644
--- a/jstests/sharding/dump_coll_metadata.js
+++ b/jstests/sharding/dump_coll_metadata.js
@@ -25,13 +25,13 @@
assert.eq(metadata.chunks.length, 1);
assert.eq(metadata.pending.length, 0);
- assert(metadata.chunks[0][0]._id + "" == MinKey + "");
- assert(metadata.chunks[0][1]._id + "" == MaxKey + "");
- assert(metadata.shardVersion + "" == result.global + "");
+ assert.eq(metadata.chunks[0][0]._id, MinKey);
+ assert.eq(metadata.chunks[0][1]._id, MaxKey);
+ assert.eq(metadata.shardVersion, result.global);
// Make sure a collection with no metadata still returns the metadata field
- assert(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata !=
- undefined);
+ assert.neq(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata,
+ undefined);
// Make sure we get multiple chunks after a split and refresh -- splits by themselves do not
// cause the shard to refresh.
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 4f36d5675d6..2fabc5a514b 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,81 +1,63 @@
(function() {
+ 'use strict';
var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
-
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', s.shard1.shardName);
// ---- can't shard system namespaces ----
-
- assert(!s.admin.runCommand({shardcollection: "test.system.blah", key: {num: 1}}).ok,
- "shard system namespace");
+ assert.commandFailed(s.s0.adminCommand({shardcollection: "test.system.blah", key: {num: 1}}),
+ "shard system namespace");
// ---- setup test.foo -----
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
+ let db = s.s0.getDB("test");
- s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-
- db = s.getDB("test");
-
- a = s._connections[0].getDB("test");
- b = s._connections[1].getDB("test");
+ assert.commandWorked(db.foo.createIndex({y: 1}));
- db.foo.ensureIndex({y: 1});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {num: 10}}));
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
- s.adminCommand({split: "test.foo", middle: {num: 10}});
- s.adminCommand(
- {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
+ assert.writeOK(db.foo.insert({num: 5}));
+ assert.writeOK(db.foo.save({num: 15}));
- db.foo.save({num: 5});
- db.foo.save({num: 15});
-
- s.sync();
+ let a = s.rs0.getPrimary().getDB("test");
+ let b = s.rs1.getPrimary().getDB("test");
// ---- make sure shard key index is everywhere ----
-
assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
// ---- make sure if you add an index it goes everywhere ------
-
- db.foo.ensureIndex({x: 1});
-
- s.sync();
-
+ assert.commandWorked(db.foo.createIndex({x: 1}));
assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
- // ---- no unique indexes ------
-
- db.foo.ensureIndex({z: 1}, true);
-
- s.sync();
-
+ // ---- no unique indexes allowed that do not include the shard key ------
+ assert.commandFailed(db.foo.createIndex({z: 1}, true));
assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
- db.foo.ensureIndex({num: 1, bar: 1}, true);
- s.sync();
+ // ---- unique indexes that include the shard key are allowed ------
+ assert.commandWorked(db.foo.createIndex({num: 1, bar: 1}, true));
assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
- // ---- can't shard thing with unique indexes
-
- db.foo2.ensureIndex({a: 1});
- s.sync();
+ // ---- can't shard thing with unique indexes ------
+ assert.commandWorked(db.foo2.createIndex({a: 1}));
printjson(db.foo2.getIndexes());
- assert(s.admin.runCommand({shardcollection: "test.foo2", key: {num: 1}}).ok,
- "shard with index");
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo2", key: {num: 1}}),
+ "shard with index");
- db.foo3.ensureIndex({a: 1}, true);
- s.sync();
+ assert.commandWorked(db.foo3.createIndex({a: 1}, true));
printjson(db.foo3.getIndexes());
- assert(!s.admin.runCommand({shardcollection: "test.foo3", key: {num: 1}}).ok,
- "shard with unique index");
+ assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo3", key: {num: 1}}),
+ "shard with unique index");
- db.foo7.ensureIndex({num: 1, a: 1}, true);
- s.sync();
+ assert.commandWorked(db.foo7.createIndex({num: 1, a: 1}, true));
printjson(db.foo7.getIndexes());
- assert(s.admin.runCommand({shardcollection: "test.foo7", key: {num: 1}}).ok,
- "shard with ok unique index");
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo7", key: {num: 1}}),
+ "shard with ok unique index");
// ----- eval -----
@@ -124,17 +106,16 @@
assert.eq(0, result.ok, "eval should not work for sharded collection in cluster");
// ---- unique shard key ----
+ assert.commandWorked(
+ s.s0.adminCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}),
+ "shard with index and unique");
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo4", middle: {num: 10}}));
+ assert.commandWorked(s.s0.adminCommand(
+ {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name}));
- assert(s.admin.runCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}).ok,
- "shard with index and unique");
- s.adminCommand({split: "test.foo4", middle: {num: 10}});
-
- s.admin.runCommand(
- {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
+ assert.writeOK(db.foo4.insert({num: 5}));
+ assert.writeOK(db.foo4.insert({num: 15}));
- assert.writeOK(db.foo4.save({num: 5}));
- assert.writeOK(db.foo4.save({num: 15}));
- s.sync();
assert.eq(1, a.foo4.count(), "ua1");
assert.eq(1, b.foo4.count(), "ub1");
@@ -145,16 +126,17 @@
assert(b.foo4.getIndexes()[1].unique, "ub3");
assert.eq(2, db.foo4.count(), "uc1");
- db.foo4.save({num: 7});
+ assert.writeOK(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc2");
- assert.writeError(db.foo4.save({num: 7}));
+ assert.writeError(db.foo4.insert({num: 7}));
assert.eq(3, db.foo4.count(), "uc4");
// --- don't let you convertToCapped ----
assert(!db.foo4.isCapped(), "ca1");
assert(!a.foo4.isCapped(), "ca2");
assert(!b.foo4.isCapped(), "ca3");
- assert(!db.foo4.convertToCapped(30000).ok, "ca30");
+
+ assert.commandFailed(db.foo4.convertToCapped(30000), "ca30");
assert(!db.foo4.isCapped(), "ca4");
assert(!a.foo4.isCapped(), "ca5");
assert(!b.foo4.isCapped(), "ca6");
@@ -166,11 +148,9 @@
assert(db.foo4a.isCapped(), "ca8");
// --- don't let you shard a capped collection
-
db.createCollection("foo5", {capped: true, size: 30000});
assert(db.foo5.isCapped(), "cb1");
- var res = s.admin.runCommand({shardcollection: "test.foo5", key: {num: 1}});
- assert(!res.ok, "shard capped: " + tojson(res));
+ assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo5", key: {num: 1}}));
// ----- group ----
@@ -220,21 +200,18 @@
});
// ---- can't shard non-empty collection without index -----
-
- assert.writeOK(db.foo8.save({a: 1}));
- assert(!s.admin.runCommand({shardcollection: "test.foo8", key: {a: 1}}).ok,
- "non-empty collection");
+ assert.writeOK(db.foo8.insert({a: 1}));
+ assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo8", key: {a: 1}}),
+ "non-empty collection");
// ---- can't shard non-empty collection with null values in shard key ----
-
- assert.writeOK(db.foo9.save({b: 1}));
- db.foo9.ensureIndex({a: 1});
- assert(!s.admin.runCommand({shardcollection: "test.foo9", key: {a: 1}}).ok,
- "entry with null value");
+ assert.writeOK(db.foo9.insert({b: 1}));
+ assert.commandWorked(db.foo9.createIndex({a: 1}));
+ assert.commandFailed(s.s0.adminCommand({shardcollection: "test.foo9", key: {a: 1}}),
+ "entry with null value");
// --- listDatabases ---
-
- r = db.getMongo().getDBs();
+ var r = db.getMongo().getDBs();
assert.eq(3, r.databases.length, tojson(r));
assert.eq("number", typeof(r.totalSize), "listDatabases 3 : " + tojson(r));
@@ -245,5 +222,4 @@
assert.commandWorked(s.s0.adminCommand({flushRouterConfig: 'TestDB.TestColl'}));
s.stop();
-
})();
diff --git a/src/mongo/base/global_initializer_registerer.cpp b/src/mongo/base/global_initializer_registerer.cpp
index 7c458470649..67b83dfd6a1 100644
--- a/src/mongo/base/global_initializer_registerer.cpp
+++ b/src/mongo/base/global_initializer_registerer.cpp
@@ -50,7 +50,6 @@ GlobalInitializerRegisterer::GlobalInitializerRegisterer(std::string name,
std::move(prerequisites),
std::move(dependents));
-
if (Status::OK() != status) {
std::cerr << "Attempt to add global initializer failed, status: " << status << std::endl;
::abort();
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index da4ecaf40ab..a9bf373f38a 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -89,6 +89,14 @@ Status createCollection(OperationContext* opCtx,
!options["capped"].trueValue() || options["size"].isNumber() ||
options.hasField("$nExtents"));
+ CollectionOptions collectionOptions;
+ {
+ Status status = collectionOptions.parse(options, kind);
+ if (!status.isOK()) {
+ return status;
+ }
+ }
+
return writeConflictRetry(opCtx, "create", nss.ns(), [&] {
Lock::DBLock dbXLock(opCtx, nss.db(), MODE_X);
const bool shardVersionCheck = true;
@@ -99,12 +107,6 @@ Status createCollection(OperationContext* opCtx,
str::stream() << "Not primary while creating collection " << nss.ns());
}
- CollectionOptions collectionOptions;
- Status status = collectionOptions.parse(options, kind);
- if (!status.isOK()) {
- return status;
- }
-
if (collectionOptions.isView()) {
// If the `system.views` collection does not exist, create it in a separate
// WriteUnitOfWork.
@@ -117,9 +119,8 @@ Status createCollection(OperationContext* opCtx,
// Create collection.
const bool createDefaultIndexes = true;
- status = Database::userCreateNS(
+ Status status = Database::userCreateNS(
opCtx, ctx.db(), nss.ns(), collectionOptions, createDefaultIndexes, idIndex);
-
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index d300df28d52..6aadfe754b4 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -171,10 +171,10 @@ Status renameCollectionCommon(OperationContext* opCtx,
// Make sure the source collection is not sharded.
{
- auto const css = CollectionShardingState::get(opCtx, source);
- if (css->getMetadata(opCtx)->isSharded()) {
+ auto* const css = CollectionShardingState::get(opCtx, source);
+ const auto metadata = css->getCurrentMetadata();
+ if (metadata->isSharded())
return {ErrorCodes::IllegalOperation, "source namespace cannot be sharded"};
- }
}
// Disallow renaming from a replicated to an unreplicated collection or vice versa.
@@ -211,10 +211,10 @@ Status renameCollectionCommon(OperationContext* opCtx,
}
{
- auto const css = CollectionShardingState::get(opCtx, target);
- if (css->getMetadata(opCtx)->isSharded()) {
+ auto* const css = CollectionShardingState::get(opCtx, target);
+ const auto metadata = css->getCurrentMetadata();
+ if (metadata->isSharded())
return {ErrorCodes::IllegalOperation, "cannot rename to a sharded collection"};
- }
}
// RenameCollectionForCommand cannot drop target by renaming.
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index b0717ce99c7..0c419beeb36 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -347,7 +347,7 @@ env.Library(
'$BUILD_DIR/mongo/db/repl/oplog',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/rw_concern_d',
- '$BUILD_DIR/mongo/db/s/sharding_catalog_manager',
+ '$BUILD_DIR/mongo/db/s/sharding_runtime_d',
'$BUILD_DIR/mongo/s/sharding_legacy_api',
'$BUILD_DIR/mongo/util/net/ssl_manager',
'core',
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index cd2891f6266..50cd6752024 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -152,7 +152,8 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ auto rangePreserver =
+ CollectionShardingState::get(opCtx, nss)->getMetadataForOperation(opCtx);
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), true /*explain*/);
@@ -205,7 +206,8 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into count.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ auto rangePreserver =
+ CollectionShardingState::get(opCtx, nss)->getMetadataForOperation(opCtx);
auto statusWithPlanExecutor =
getExecutorCount(opCtx, collection, request.getValue(), false /*explain*/);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 167d5f3161b..fd71731c664 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -234,8 +234,6 @@ std::vector<BSONObj> resolveDefaultsAndRemoveExistingIndexes(OperationContext* o
return specs;
}
-} // namespace
-
/**
* { createIndexes : "bar", indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ] }
*/
@@ -243,16 +241,17 @@ class CmdCreateIndex : public ErrmsgCommandDeprecated {
public:
CmdCreateIndex() : ErrmsgCommandDeprecated(kCommandName) {}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
+
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kNever;
}
- virtual Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const {
+ Status checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) const override {
ActionSet actions;
actions.addAction(ActionType::createIndex);
Privilege p(parseResourcePattern(dbname, cmdObj), actions);
@@ -261,26 +260,22 @@ public:
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool errmsgRun(OperationContext* opCtx,
- const string& dbname,
- const BSONObj& cmdObj,
- string& errmsg,
- BSONObjBuilder& result) {
+ bool errmsgRun(OperationContext* opCtx,
+ const string& dbname,
+ const BSONObj& cmdObj,
+ string& errmsg,
+ BSONObjBuilder& result) override {
const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
+ uassertStatusOK(userAllowedWriteNS(ns));
- Status status = userAllowedWriteNS(ns);
- uassertStatusOK(status);
-
- // Disallow users from creating new indexes on config.transactions since the sessions
- // code was optimized to not update indexes.
+ // Disallow users from creating new indexes on config.transactions since the sessions code
+ // was optimized to not update indexes
uassert(ErrorCodes::IllegalOperation,
str::stream() << "not allowed to create index on " << ns.ns(),
ns != NamespaceString::kSessionTransactionsTableNamespace);
- auto specsWithStatus =
- parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility);
- uassertStatusOK(specsWithStatus.getStatus());
- auto specs = std::move(specsWithStatus.getValue());
+ auto specs = uassertStatusOK(
+ parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility));
// Do not use AutoGetOrCreateDb because we may relock the database in mode X.
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IX);
@@ -335,8 +330,7 @@ public:
uasserted(ErrorCodes::CommandNotSupportedOnView, errmsg);
}
- status = userAllowedCreateNS(ns.db(), ns.coll());
- uassertStatusOK(status);
+ uassertStatusOK(userAllowedCreateNS(ns.db(), ns.coll()));
writeConflictRetry(opCtx, kCommandName, ns.ns(), [&] {
WriteUnitOfWork wunit(opCtx);
@@ -368,8 +362,7 @@ public:
for (size_t i = 0; i < specs.size(); i++) {
const BSONObj& spec = specs[i];
if (spec["unique"].trueValue()) {
- status = checkUniqueIndexConstraints(opCtx, ns, spec["key"].Obj());
- uassertStatusOK(status);
+ _checkUniqueIndexConstraints(opCtx, ns, spec["key"].Obj());
}
}
@@ -435,24 +428,24 @@ public:
}
private:
- static Status checkUniqueIndexConstraints(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& newIdxKey) {
+ static void _checkUniqueIndexConstraints(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& newIdxKey) {
invariant(opCtx->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
- auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
- if (metadata->isSharded()) {
- ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
- if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
- return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON());
- }
- }
-
+ const auto metadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
+ if (!metadata->isSharded())
+ return;
- return Status::OK();
+ const ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
+ uassert(ErrorCodes::CannotCreateIndex,
+ str::stream() << "cannot create unique index over " << newIdxKey
+ << " with shard key pattern "
+ << shardKeyPattern.toBSON(),
+ shardKeyPattern.isUniqueIndexCompatible(newIdxKey));
}
+
} cmdCreateIndex;
-}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index e014a13e02c..336ce83ebba 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -231,7 +231,8 @@ public:
// Prevent chunks from being cleaned up during yields - this allows us to only check the
// version on initial entry into geoNear.
- auto rangePreserver = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ auto rangePreserver =
+ CollectionShardingState::get(opCtx, nss)->getMetadataForOperation(opCtx);
const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
const PlanExecutor::YieldPolicy yieldPolicy =
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 3637948a1e7..4a23c78b7fb 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -62,8 +62,7 @@
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
@@ -454,6 +453,9 @@ void State::prepTempCollection() {
<< status.code());
}
wuow.commit();
+
+ CollectionShardingRuntime::get(_opCtx, _config.incLong)
+ ->setFilteringMetadata(_opCtx, CollectionMetadata());
});
}
@@ -503,6 +505,7 @@ void State::prepTempCollection() {
CollectionOptions options = finalOptions;
options.temp = true;
+
// If a UUID for the final output collection was sent by mongos (i.e., the final output
// collection is sharded), use the UUID mongos sent when creating the temp collection.
// When the temp collection is renamed to the final output collection, the UUID will be
@@ -534,6 +537,9 @@ void State::prepTempCollection() {
_opCtx, _config.tempNamespace, uuid, *it, false);
}
wuow.commit();
+
+ CollectionShardingRuntime::get(_opCtx, _config.tempNamespace)
+ ->setFilteringMetadata(_opCtx, CollectionMetadata());
});
}
@@ -1426,12 +1432,9 @@ public:
uassert(16149, "cannot run map reduce without the js engine", getGlobalScriptEngine());
- // Prevent sharding state from changing during the MR.
- const auto collMetadata = [&] {
- // Get metadata before we check our version, to make sure it doesn't increment in the
- // meantime
+ const auto metadata = [&] {
AutoGetCollectionForReadCommand autoColl(opCtx, config.nss);
- return CollectionShardingState::get(opCtx, config.nss)->getMetadata(opCtx);
+ return CollectionShardingState::get(opCtx, config.nss)->getMetadataForOperation(opCtx);
}();
bool shouldHaveData = false;
@@ -1499,17 +1502,13 @@ public:
const ExtensionsCallbackReal extensionsCallback(opCtx, &config.nss);
const boost::intrusive_ptr<ExpressionContext> expCtx;
- auto statusWithCQ =
+ auto cq = uassertStatusOKWithContext(
CanonicalQuery::canonicalize(opCtx,
std::move(qr),
expCtx,
extensionsCallback,
- MatchExpressionParser::kAllowAllSpecialFeatures);
- if (!statusWithCQ.isOK()) {
- uasserted(17238, "Can't canonicalize query " + config.filter.toString());
- return 0;
- }
- std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ MatchExpressionParser::kAllowAllSpecialFeatures),
+ str::stream() << "Can't canonicalize query " << config.filter);
unique_ptr<PlanExecutor, PlanExecutor::Deleter> exec;
{
@@ -1536,38 +1535,37 @@ public:
Timer mt;
- // go through each doc
BSONObj o;
PlanExecutor::ExecState execState;
while (PlanExecutor::ADVANCED == (execState = exec->getNext(&o, NULL))) {
- o = o.getOwned(); // we will be accessing outside of the lock
- // check to see if this is a new object we don't own yet
- // because of a chunk migration
- if (collMetadata->isSharded()) {
- ShardKeyPattern kp(collMetadata->getKeyPattern());
- if (!collMetadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
+ o = o.getOwned(); // The object will be accessed outside of collection lock
+
+ // Check to see if this is a new object we don't own yet because of a chunk
+ // migration
+ if (metadata->isSharded()) {
+ ShardKeyPattern kp(metadata->getKeyPattern());
+ if (!metadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
continue;
}
}
- // do map
if (config.verbose)
mt.reset();
+
config.mapper->map(o);
+
if (config.verbose)
mapTime += mt.micros();
- // Check if the state accumulated so far needs to be written to a
- // collection. This may yield the DB lock temporarily and then
- // acquire it again.
- //
+ // Check if the state accumulated so far needs to be written to a collection.
+ // This may yield the DB lock temporarily and then acquire it again.
numInputs++;
if (numInputs % 100 == 0) {
Timer t;
- // TODO: As an optimization, we might want to do the save/restore
- // state and yield inside the reduceAndSpillInMemoryState method, so
- // it only happens if necessary.
+ // TODO: As an optimization, we might want to do the save/restore state and
+ // yield inside the reduceAndSpillInMemoryState method, so it only happens
+ // if necessary.
exec->saveState();
scopedAutoDb.reset();
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 559673940db..eb568bfb8e4 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -135,7 +135,7 @@ bool shouldRestartUpdateIfNoLongerMatches(const UpdateStageParams& params) {
const std::vector<std::unique_ptr<FieldRef>>* getImmutableFields(OperationContext* opCtx,
const NamespaceString& ns) {
- auto metadata = CollectionShardingState::get(opCtx, ns)->getMetadata(opCtx);
+ auto metadata = CollectionShardingState::get(opCtx, ns)->getCurrentMetadata();
if (metadata->isSharded()) {
const std::vector<std::unique_ptr<FieldRef>>& fields = metadata->getKeyPatternFields();
// Return shard-keys as immutable for the update system.
@@ -288,13 +288,12 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
RecordId newRecordId;
OplogUpdateEntryArgs args;
if (!request->isExplain()) {
- invariant(_collection);
- auto* css = CollectionShardingState::get(getOpCtx(), _collection->ns());
+ auto* const css = CollectionShardingState::get(getOpCtx(), _collection->ns());
args.nss = _collection->ns();
args.uuid = _collection->uuid();
args.stmtId = request->getStmtId();
args.update = logObj;
- auto metadata = css->getMetadata(getOpCtx());
+ auto metadata = css->getCurrentMetadata();
args.criteria = metadata->extractDocumentKey(newObj);
uassert(16980,
"Multi-update operations require all documents to have an '_id' field",
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index 35cfdb77237..15abf920fa6 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -311,7 +311,7 @@ OpTimeBundle replLogApplyOps(OperationContext* opCtx,
BSONObj OpObserverImpl::getDocumentKey(OperationContext* opCtx,
NamespaceString const& nss,
BSONObj const& doc) {
- auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadataForOperation(opCtx);
return metadata->extractDocumentKey(doc).getOwned();
}
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 0661430956f..9523261eb8a 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -157,7 +157,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
if (ShardingState::get(opCtx)->needCollectionMetadata(opCtx, collection->ns().ns())) {
auto shardFilterStage = stdx::make_unique<ShardFilterStage>(
opCtx,
- CollectionShardingState::get(opCtx, collection->ns())->getMetadata(opCtx),
+ CollectionShardingState::get(opCtx, collection->ns())->getMetadataForOperation(opCtx),
ws.get(),
stage.release());
return PlanExecutor::make(opCtx,
@@ -610,7 +610,7 @@ DBClientBase* PipelineD::MongoDInterface::directClient() {
bool PipelineD::MongoDInterface::isSharded(OperationContext* opCtx, const NamespaceString& nss) {
AutoGetCollectionForReadCommand autoColl(opCtx, nss);
auto const css = CollectionShardingState::get(opCtx, nss);
- return css->getMetadata(opCtx)->isSharded();
+ return css->getCurrentMetadata()->isSharded();
}
BSONObj PipelineD::MongoDInterface::insert(const boost::intrusive_ptr<ExpressionContext>& expCtx,
@@ -752,7 +752,7 @@ Status PipelineD::MongoDInterface::attachCursorSourceToPipeline(
auto css = CollectionShardingState::get(expCtx->opCtx, expCtx->ns);
uassert(4567,
str::stream() << "from collection (" << expCtx->ns.ns() << ") cannot be sharded",
- !css->getMetadata(expCtx->opCtx)->isSharded());
+ !css->getCurrentMetadata()->isSharded());
PipelineD::prepareCursorSource(autoColl->getCollection(), expCtx->ns, nullptr, pipeline);
@@ -800,7 +800,7 @@ std::pair<std::vector<FieldPath>, bool> PipelineD::MongoDInterface::collectDocum
auto scm = [opCtx, &nss]() -> ScopedCollectionMetadata {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ return CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
}();
// Collection is not sharded or UUID mismatch implies collection has been dropped and recreated
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 4f7f4477081..defba5f3c69 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -178,8 +178,8 @@ void fillOutPlannerParams(OperationContext* opCtx,
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- auto collMetadata =
- CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata(opCtx);
+ auto collMetadata = CollectionShardingState::get(opCtx, canonicalQuery->nss())
+ ->getMetadataForOperation(opCtx);
if (collMetadata->isSharded()) {
plannerParams->shardKey = collMetadata->getKeyPattern();
} else {
@@ -313,7 +313,8 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
root = make_unique<ShardFilterStage>(
opCtx,
- CollectionShardingState::get(opCtx, canonicalQuery->nss())->getMetadata(opCtx),
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())
+ ->getMetadataForOperation(opCtx),
ws,
root.release());
}
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index c23997773dd..c1390d0d18c 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -302,11 +302,11 @@ PlanStage* buildStages(OperationContext* opCtx,
if (nullptr == childStage) {
return nullptr;
}
- return new ShardFilterStage(
- opCtx,
- CollectionShardingState::get(opCtx, collection->ns())->getMetadata(opCtx),
- ws,
- childStage);
+ return new ShardFilterStage(opCtx,
+ CollectionShardingState::get(opCtx, collection->ns())
+ ->getMetadataForOperation(opCtx),
+ ws,
+ childStage);
}
case STAGE_KEEP_MUTATIONS: {
const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 3cf9ec3728d..8bb1c5546e0 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -81,11 +81,16 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
{
AutoGetCollection autoColl(opCtx, ns, MODE_IX);
auto* const css = CollectionShardingRuntime::get(opCtx, ns);
+ const auto optMetadata = css->getCurrentMetadataIfKnown();
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "Unable to establish sharding status for collection " << ns.ns(),
+ optMetadata);
+
+ const auto& metadata = *optMetadata;
- auto metadata = css->getMetadata(opCtx);
if (!metadata->isSharded()) {
- log() << "skipping orphaned data cleanup for " << ns.toString()
- << ", collection is not sharded";
+ LOG(0) << "skipping orphaned data cleanup for " << ns.ns()
+ << ", collection is not sharded";
return CleanupResult_Done;
}
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 7f777dfa179..46db156c36a 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_server_test_fixture.h"
@@ -107,6 +108,12 @@ protected:
}
_manager->setFilteringMetadata(CollectionMetadata(cm, ShardId("0")));
+
+ auto& oss = OperationShardingState::get(operationContext());
+ const auto version = cm->getVersion(ShardId("0"));
+ BSONObjBuilder builder;
+ version.appendToCommand(&builder);
+ oss.initializeClientRoutingVersions(kNss, builder.obj());
}
std::shared_ptr<MetadataManager> _manager;
@@ -134,7 +141,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getMetadata(operationContext()));
+ testFn(css->getMetadataForOperation(operationContext()));
}
{
@@ -165,7 +172,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getMetadata(operationContext()));
+ testFn(css->getMetadataForOperation(operationContext()));
}
{
@@ -204,7 +211,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getMetadata(operationContext()));
+ testFn(css->getMetadataForOperation(operationContext()));
}
{
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index f6f92850408..6fc514ca2a7 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -45,6 +45,30 @@ namespace {
// How long to wait before starting cleanup of an emigrated chunk range
MONGO_EXPORT_SERVER_PARAMETER(orphanCleanupDelaySecs, int, 900); // 900s = 15m
+/**
+ * Returns whether the specified namespace is used for sharding-internal purposes only and can never
+ * be marked as anything other than UNSHARDED, because the call sites which reference these
+ * collections are not prepared to handle StaleConfig errors.
+ */
+bool isNamespaceAlwaysUnsharded(const NamespaceString& nss) {
+ // There should never be a case to mark as sharded collections which are on the config server
+ if (serverGlobalParams.clusterRole != ClusterRole::ShardServer)
+ return true;
+
+ // Local and admin never have sharded collections
+ if (nss.db() == NamespaceString::kLocalDb || nss.db() == NamespaceString::kAdminDb)
+ return true;
+
+ // Certain config collections can never be sharded
+ if (nss == NamespaceString::kSessionTransactionsTableNamespace)
+ return true;
+
+ if (nss.isSystemDotProfile())
+ return true;
+
+ return false;
+}
+
} // namespace
CollectionShardingRuntime::CollectionShardingRuntime(ServiceContext* sc,
@@ -52,7 +76,11 @@ CollectionShardingRuntime::CollectionShardingRuntime(ServiceContext* sc,
executor::TaskExecutor* rangeDeleterExecutor)
: CollectionShardingState(nss),
_nss(std::move(nss)),
- _metadataManager(std::make_shared<MetadataManager>(sc, _nss, rangeDeleterExecutor)) {}
+ _metadataManager(std::make_shared<MetadataManager>(sc, _nss, rangeDeleterExecutor)) {
+ if (isNamespaceAlwaysUnsharded(_nss)) {
+ _metadataManager->setFilteringMetadata(CollectionMetadata());
+ }
+}
CollectionShardingRuntime* CollectionShardingRuntime::get(OperationContext* opCtx,
const NamespaceString& nss) {
@@ -62,13 +90,17 @@ CollectionShardingRuntime* CollectionShardingRuntime::get(OperationContext* opCt
void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx,
CollectionMetadata newMetadata) {
+ invariant(!newMetadata.isSharded() || !isNamespaceAlwaysUnsharded(_nss),
+ str::stream() << "Namespace " << _nss.ns() << " must never be sharded.");
invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
_metadataManager->setFilteringMetadata(std::move(newMetadata));
}
void CollectionShardingRuntime::clearFilteringMetadata() {
- _metadataManager->clearFilteringMetadata();
+ if (!isNamespaceAlwaysUnsharded(_nss)) {
+ _metadataManager->clearFilteringMetadata();
+ }
}
auto CollectionShardingRuntime::beginReceive(ChunkRange const& range) -> CleanupNotification {
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 45645383969..f65f262a029 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -34,6 +34,7 @@
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/s/stale_exception.h"
@@ -72,8 +73,9 @@ public:
stdx::lock_guard<stdx::mutex> lg(_mutex);
for (auto& coll : _collections) {
- ScopedCollectionMetadata metadata = coll.second->getMetadata(opCtx);
- if (metadata->isSharded()) {
+ const auto optMetadata = coll.second->getCurrentMetadataIfKnown();
+ if (optMetadata) {
+ const auto& metadata = *optMetadata;
versionB.appendTimestamp(coll.first, metadata->getShardVersion().toLong());
}
}
@@ -109,6 +111,28 @@ private:
const auto kUnshardedCollection = std::make_shared<UnshardedCollection>();
+ChunkVersion getOperationReceivedVersion(OperationContext* opCtx, const NamespaceString& nss) {
+ auto& oss = OperationShardingState::get(opCtx);
+
+ // If there is a version attached to the OperationContext, use it as the received version,
+ // otherwise get the received version from the ShardedConnectionInfo
+ if (oss.hasShardVersion()) {
+ return oss.getShardVersion(nss);
+ } else if (auto const info = ShardedConnectionInfo::get(opCtx->getClient(), false)) {
+ auto connectionShardVersion = info->getVersion(nss.ns());
+
+ // For backwards compatibility with map/reduce, which can access up to 2 sharded collections
+ // in a single call, the lack of version for a namespace on the collection must be treated
+ // as UNSHARDED
+ return connectionShardVersion.value_or(ChunkVersion::UNSHARDED());
+ } else {
+ // There is no shard version information on either 'opCtx' or 'client'. This means that the
+ // operation represented by 'opCtx' is unversioned, and the shard version is always OK for
+ // unversioned operations.
+ return ChunkVersion::IGNORED();
+ }
+}
+
} // namespace
CollectionShardingState::CollectionShardingState(NamespaceString nss) : _nss(std::move(nss)) {}
@@ -127,37 +151,49 @@ void CollectionShardingState::report(OperationContext* opCtx, BSONObjBuilder* bu
collectionsMap->report(opCtx, builder);
}
-ScopedCollectionMetadata CollectionShardingState::getMetadata(OperationContext* opCtx) {
+ScopedCollectionMetadata CollectionShardingState::getMetadataForOperation(OperationContext* opCtx) {
+ const auto receivedShardVersion = getOperationReceivedVersion(opCtx, _nss);
+
+ if (ChunkVersion::isIgnoredVersion(receivedShardVersion)) {
+ return {kUnshardedCollection};
+ }
+
const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime();
auto optMetadata = _getMetadata(atClusterTime);
+
if (!optMetadata)
return {kUnshardedCollection};
- return std::move(*optMetadata);
+ return {std::move(*optMetadata)};
}
-void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx) {
- auto& oss = OperationShardingState::get(opCtx);
+ScopedCollectionMetadata CollectionShardingState::getCurrentMetadata() {
+ auto optMetadata = _getMetadata(boost::none);
- const auto receivedShardVersion = [&] {
- // If there is a version attached to the OperationContext, use it as the received version,
- // otherwise get the received version from the ShardedConnectionInfo
- if (oss.hasShardVersion()) {
- return oss.getShardVersion(_nss);
- } else if (auto const info = ShardedConnectionInfo::get(opCtx->getClient(), false)) {
- auto connectionShardVersion = info->getVersion(_nss.ns());
-
- // For backwards compatibility with map/reduce, which can access up to 2 sharded
- // collections in a single call, the lack of version for a namespace on the collection
- // must be treated as UNSHARDED
- return connectionShardVersion.value_or(ChunkVersion::UNSHARDED());
- } else {
- // There is no shard version information on either 'opCtx' or 'client'. This means that
- // the operation represented by 'opCtx' is unversioned, and the shard version is always
- // OK for unversioned operations.
- return ChunkVersion::IGNORED();
- }
- }();
+ if (!optMetadata)
+ return {kUnshardedCollection};
+
+ return {std::move(*optMetadata)};
+}
+
+boost::optional<ScopedCollectionMetadata> CollectionShardingState::getCurrentMetadataIfKnown() {
+ return _getMetadata(boost::none);
+}
+
+boost::optional<ChunkVersion> CollectionShardingState::getCurrentShardVersionIfKnown() {
+ const auto optMetadata = _getMetadata(boost::none);
+ if (!optMetadata)
+ return boost::none;
+
+ const auto& metadata = *optMetadata;
+ if (!metadata->isSharded())
+ return ChunkVersion::UNSHARDED();
+
+ return metadata->getCollVersion();
+}
+
+void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx) {
+ const auto receivedShardVersion = getOperationReceivedVersion(opCtx, _nss);
if (ChunkVersion::isIgnoredVersion(receivedShardVersion)) {
return;
@@ -167,8 +203,7 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx)
invariant(repl::ReadConcernArgs::get(opCtx).getLevel() !=
repl::ReadConcernLevel::kAvailableReadConcern);
- // Set this for error messaging purposes before potentially returning false.
- auto metadata = getMetadata(opCtx);
+ const auto metadata = getMetadataForOperation(opCtx);
const auto wantedShardVersion =
metadata->isSharded() ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
@@ -178,6 +213,7 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx)
if (criticalSectionSignal) {
// Set migration critical section on operation sharding state: operation will wait for the
// migration to finish before returning failure and retrying.
+ auto& oss = OperationShardingState::get(opCtx);
oss.setMigrationCriticalSectionSignal(criticalSectionSignal);
uasserted(StaleConfigInfo(_nss, receivedShardVersion, wantedShardVersion),
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 77e11534f73..e964dbced12 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -73,14 +73,28 @@ public:
static void report(OperationContext* opCtx, BSONObjBuilder* builder);
/**
- * Returns the chunk filtering metadata for the collection. The returned object is safe to
- * access outside of collection lock.
+ * Returns the chunk filtering metadata that the current operation should be using for that
+ * collection or otherwise throws if it has not been loaded yet. If the operation does not
+ * require a specific shard version, returns an UNSHARDED metadata. The returned object is safe
+ * to access outside of collection lock.
*
* If the operation context contains an 'atClusterTime' property, the returned filtering
* metadata will be tied to a specific point in time. Otherwise it will reference the latest
* time available.
*/
- ScopedCollectionMetadata getMetadata(OperationContext* opCtx);
+ ScopedCollectionMetadata getMetadataForOperation(OperationContext* opCtx);
+ ScopedCollectionMetadata getCurrentMetadata();
+
+ /**
+ * Returns boost::none if the filtering metadata for the collection is not known yet. Otherwise
+ * returns the most recently refreshed from the config server metadata or shard version.
+ *
+ * These methods do not check for the shard version that the operation requires and should only
+ * be used for cases such as checking whether a particular config server update has taken
+ * effect.
+ */
+ boost::optional<ScopedCollectionMetadata> getCurrentMetadataIfKnown();
+ boost::optional<ChunkVersion> getCurrentShardVersionIfKnown();
/**
* Checks whether the shard version in the operation context is compatible with the shard
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index f5c4a3aad43..a8361bd1490 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
+#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/s/shard_server_test_fixture.h"
@@ -58,12 +59,25 @@ CollectionMetadata makeAMetadata(BSONObj const& keyPattern) {
return CollectionMetadata(std::move(cm), ShardId("this"));
}
-using DeleteStateTest = ShardServerTestFixture;
+class DeleteStateTest : public ShardServerTestFixture {
+protected:
+ void setCollectionFilteringMetadata(CollectionMetadata metadata) {
+ AutoGetCollection autoColl(operationContext(), kTestNss, MODE_X);
+ auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
+ css->setFilteringMetadata(operationContext(), std::move(metadata));
+
+ auto& oss = OperationShardingState::get(operationContext());
+ const auto version = metadata.getShardVersion();
+ BSONObjBuilder builder;
+ version.appendToCommand(&builder);
+ oss.initializeClientRoutingVersions(kTestNss, builder.obj());
+ }
+};
TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
+ setCollectionFilteringMetadata(CollectionMetadata());
+
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
- auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
- css->setFilteringMetadata(operationContext(), CollectionMetadata());
auto doc = BSON("key3"
<< "abc"
@@ -83,11 +97,10 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
}
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
- AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
- auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
-
// Push a CollectionMetadata with a shard key not including "_id"...
- css->setFilteringMetadata(operationContext(), makeAMetadata(BSON("key" << 1 << "key3" << 1)));
+ setCollectionFilteringMetadata(makeAMetadata(BSON("key" << 1 << "key3" << 1)));
+
+ AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key3"
@@ -109,12 +122,10 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
}
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
- AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
- auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
-
// Push a CollectionMetadata with a shard key that does have "_id" in the middle...
- css->setFilteringMetadata(operationContext(),
- makeAMetadata(BSON("key" << 1 << "_id" << 1 << "key2" << 1)));
+ setCollectionFilteringMetadata(makeAMetadata(BSON("key" << 1 << "_id" << 1 << "key2" << 1)));
+
+ AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
// The order of fields in `doc` deliberately does not match the shard key
auto doc = BSON("key2" << true << "key3"
@@ -134,13 +145,11 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
}
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
- AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
- auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
-
// Push a CollectionMetadata with a shard key "_id", hashed.
- auto aMetadata = makeAMetadata(BSON("_id"
- << "hashed"));
- css->setFilteringMetadata(operationContext(), std::move(aMetadata));
+ setCollectionFilteringMetadata(makeAMetadata(BSON("_id"
+ << "hashed")));
+
+ AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
auto doc = BSON("key2" << true << "_id"
<< "hello"
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index a67a90efafb..a51ce749549 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -111,27 +111,37 @@ public:
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
auto* const css = CollectionShardingRuntime::get(opCtx, nss);
- const auto metadata = css->getMetadata(opCtx);
- if (metadata->isSharded()) {
- result.appendTimestamp("global", metadata->getShardVersion().toLong());
+ const auto optMetadata = css->getCurrentMetadataIfKnown();
+ if (!optMetadata) {
+ result.append("global", "UNKNOWN");
+
+ if (cmdObj["fullMetadata"].trueValue()) {
+ result.append("metadata", BSONObj());
+ }
} else {
- result.appendTimestamp("global", ChunkVersion::UNSHARDED().toLong());
- }
+ const auto& metadata = *optMetadata;
- if (cmdObj["fullMetadata"].trueValue()) {
- BSONObjBuilder metadataBuilder(result.subobjStart("metadata"));
if (metadata->isSharded()) {
- metadata->toBSONBasic(metadataBuilder);
+ result.appendTimestamp("global", metadata->getShardVersion().toLong());
+ } else {
+ result.appendTimestamp("global", ChunkVersion::UNSHARDED().toLong());
+ }
+
+ if (cmdObj["fullMetadata"].trueValue()) {
+ BSONObjBuilder metadataBuilder(result.subobjStart("metadata"));
+ if (metadata->isSharded()) {
+ metadata->toBSONBasic(metadataBuilder);
- BSONArrayBuilder chunksArr(metadataBuilder.subarrayStart("chunks"));
- metadata->toBSONChunks(chunksArr);
- chunksArr.doneFast();
+ BSONArrayBuilder chunksArr(metadataBuilder.subarrayStart("chunks"));
+ metadata->toBSONChunks(chunksArr);
+ chunksArr.doneFast();
- BSONArrayBuilder pendingArr(metadataBuilder.subarrayStart("pending"));
- css->toBSONPending(pendingArr);
- pendingArr.doneFast();
+ BSONArrayBuilder pendingArr(metadataBuilder.subarrayStart("pending"));
+ css->toBSONPending(pendingArr);
+ pendingArr.doneFast();
+ }
+ metadataBuilder.doneFast();
}
- metadataBuilder.doneFast();
}
return true;
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index a4d42284520..1208f5a8b52 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -51,55 +51,46 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
-
-using std::string;
-using std::vector;
-using str::stream;
-
namespace {
bool checkMetadataForSuccess(OperationContext* opCtx,
const NamespaceString& nss,
- const BSONObj& minKey,
- const BSONObj& maxKey) {
+ const OID& epoch,
+ const ChunkRange& chunkRange) {
const auto metadataAfterMerge = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ return CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
}();
uassert(ErrorCodes::StaleEpoch,
- str::stream() << "Collection " << nss.ns() << " became unsharded",
- metadataAfterMerge->isSharded());
+ str::stream() << "Collection " << nss.ns() << " changed since merge start",
+ metadataAfterMerge->getCollVersion().epoch() == epoch);
ChunkType chunk;
- if (!metadataAfterMerge->getNextChunk(minKey, &chunk)) {
+ if (!metadataAfterMerge->getNextChunk(chunkRange.getMin(), &chunk)) {
return false;
}
- return chunk.getMin().woCompare(minKey) == 0 && chunk.getMax().woCompare(maxKey) == 0;
+ return chunk.getMin().woCompare(chunkRange.getMin()) == 0 &&
+ chunk.getMax().woCompare(chunkRange.getMax()) == 0;
}
-Status mergeChunks(OperationContext* opCtx,
- const NamespaceString& nss,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const OID& epoch) {
- // Get the distributed lock
- // TODO(SERVER-25086): Remove distLock acquisition from merge chunk
- const string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey
- << " to " << maxKey;
-
- auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
- opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
-
- if (!scopedDistLock.isOK()) {
- std::string context = stream() << "could not acquire collection lock for " << nss.ns()
- << " to merge chunks in [" << redact(minKey) << ", "
- << redact(maxKey) << ")";
-
- warning() << context << causedBy(scopedDistLock.getStatus());
- return scopedDistLock.getStatus().withContext(context);
- }
+void mergeChunks(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const OID& epoch) {
+ const std::string whyMessage = str::stream() << "merging chunks in " << nss.ns() << " from "
+ << minKey << " to " << maxKey;
+ auto scopedDistLock = uassertStatusOKWithContext(
+ Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
+ opCtx, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout),
+ str::stream() << "could not acquire collection lock for " << nss.ns()
+ << " to merge chunks in ["
+ << redact(minKey)
+ << ", "
+ << redact(maxKey)
+ << ")");
auto const shardingState = ShardingState::get(opCtx);
@@ -109,39 +100,32 @@ Status mergeChunks(OperationContext* opCtx,
const auto metadata = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ return CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
}();
- if (!metadata->isSharded()) {
- std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
- << " is not sharded";
-
- warning() << errmsg;
- return {ErrorCodes::StaleEpoch, errmsg};
- }
+ uassert(ErrorCodes::StaleEpoch,
+ str::stream() << "Collection " << nss.ns() << " became unsharded",
+ metadata->isSharded());
const auto shardVersion = metadata->getShardVersion();
-
- if (epoch.isSet() && shardVersion.epoch() != epoch) {
- std::string errmsg = stream()
- << "could not merge chunks, collection " << nss.ns()
- << " has changed since merge was sent (sent epoch: " << epoch.toString()
- << ", current epoch: " << shardVersion.epoch() << ")";
-
- warning() << errmsg;
- return {ErrorCodes::StaleEpoch, errmsg};
- }
-
- if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) {
- std::string errmsg = stream() << "could not merge chunks, the range "
- << redact(ChunkRange(minKey, maxKey).toString())
- << " is not valid"
- << " for collection " << nss.ns() << " with key pattern "
- << metadata->getKeyPattern().toString();
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(ErrorCodes::StaleEpoch,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " has changed since merge was sent (sent epoch: "
+ << epoch.toString()
+ << ", current epoch: "
+ << shardVersion.epoch()
+ << ")",
+ !epoch.isSet() || shardVersion.epoch() == epoch);
+
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, the range "
+ << redact(ChunkRange(minKey, maxKey).toString())
+ << " is not valid"
+ << " for collection "
+ << nss.ns()
+ << " with key pattern "
+ << metadata->getKeyPattern().toString(),
+ metadata->isValidKey(minKey) && metadata->isValidKey(maxKey));
//
// Get merged chunk information
@@ -160,15 +144,15 @@ Status mergeChunks(OperationContext* opCtx,
chunksToMerge.push_back(itChunk);
}
- if (chunksToMerge.empty()) {
- std::string errmsg = stream()
- << "could not merge chunks, collection " << nss.ns() << " range starting at "
- << redact(minKey) << " and ending at " << redact(maxKey) << " does not belong to shard "
- << shardingState->shardId();
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " range starting at "
+ << redact(minKey)
+ << " and ending at "
+ << redact(maxKey)
+ << " does not belong to shard "
+ << shardingState->shardId(),
+ !chunksToMerge.empty());
//
// Validate the range starts and ends at chunks and has no holes, error if not valid
@@ -179,65 +163,56 @@ Status mergeChunks(OperationContext* opCtx,
// minKey is inclusive
bool minKeyInRange = rangeContains(firstDocMin, firstDocMax, minKey);
- if (!minKeyInRange) {
- std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
- << " range starting at " << redact(minKey)
- << " does not belong to shard " << shardingState->shardId();
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " range starting at "
+ << redact(minKey)
+ << " does not belong to shard "
+ << shardingState->shardId(),
+ minKeyInRange);
BSONObj lastDocMin = chunksToMerge.back().getMin();
BSONObj lastDocMax = chunksToMerge.back().getMax();
// maxKey is exclusive
bool maxKeyInRange = lastDocMin.woCompare(maxKey) < 0 && lastDocMax.woCompare(maxKey) >= 0;
- if (!maxKeyInRange) {
- std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
- << " range ending at " << redact(maxKey)
- << " does not belong to shard " << shardingState->shardId();
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " range ending at "
+ << redact(maxKey)
+ << " does not belong to shard "
+ << shardingState->shardId(),
+ maxKeyInRange);
bool validRangeStartKey = firstDocMin.woCompare(minKey) == 0;
bool validRangeEndKey = lastDocMax.woCompare(maxKey) == 0;
- if (!validRangeStartKey || !validRangeEndKey) {
- std::string errmsg = stream()
- << "could not merge chunks, collection " << nss.ns() << " does not contain a chunk "
- << (!validRangeStartKey ? "starting at " + redact(minKey.toString()) : "")
- << (!validRangeStartKey && !validRangeEndKey ? " or " : "")
- << (!validRangeEndKey ? "ending at " + redact(maxKey.toString()) : "");
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
-
- if (chunksToMerge.size() == 1) {
- std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
- << " already contains chunk for "
- << redact(ChunkRange(minKey, maxKey).toString());
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " does not contain a chunk "
+ << (!validRangeStartKey ? "starting at " + redact(minKey.toString()) : "")
+ << (!validRangeStartKey && !validRangeEndKey ? " or " : "")
+ << (!validRangeEndKey ? "ending at " + redact(maxKey.toString()) : ""),
+ validRangeStartKey && validRangeEndKey);
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "could not merge chunks, collection " << nss.ns()
+ << " already contains chunk for "
+ << ChunkRange(minKey, maxKey).toString(),
+ chunksToMerge.size() > 1);
// Look for hole in range
for (size_t i = 1; i < chunksToMerge.size(); ++i) {
- if (chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) != 0) {
- std::string errmsg = stream()
- << "could not merge chunks, collection " << nss.ns() << " has a hole in the range "
- << redact(ChunkRange(minKey, maxKey).toString()) << " at "
- << redact(ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin())
- .toString());
-
- warning() << errmsg;
- return Status(ErrorCodes::IllegalOperation, errmsg);
- }
+ uassert(
+ ErrorCodes::IllegalOperation,
+ str::stream()
+ << "could not merge chunks, collection "
+ << nss.ns()
+ << " has a hole in the range "
+ << ChunkRange(minKey, maxKey).toString()
+ << " at "
+ << ChunkRange(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()).toString(),
+ chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) == 0);
}
//
@@ -251,42 +226,33 @@ Status mergeChunks(OperationContext* opCtx,
auto configCmdObj =
request.toConfigCommandBSON(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
- auto cmdResponseStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommand(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- "admin",
- configCmdObj,
- Shard::RetryPolicy::kIdempotent);
+ auto cmdResponse =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommand(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ "admin",
+ configCmdObj,
+ Shard::RetryPolicy::kIdempotent));
// Refresh metadata to pick up new chunk definitions (regardless of the results returned from
// running _configsvrCommitChunkMerge).
forceShardFilteringMetadataRefresh(opCtx, nss);
- // If we failed to get any response from the config server at all, despite retries, then we
- // should just go ahead and fail the whole operation.
- if (!cmdResponseStatus.isOK()) {
- return cmdResponseStatus.getStatus();
- }
-
// If _configsvrCommitChunkMerge returned an error, look at this shard's metadata to determine
// if the merge actually did happen. This can happen if there's a network error getting the
// response from the first call to _configsvrCommitChunkMerge, but it actually succeeds, thus
// the automatic retry fails with a precondition violation, for example.
- auto commandStatus = std::move(cmdResponseStatus.getValue().commandStatus);
- auto writeConcernStatus = std::move(cmdResponseStatus.getValue().writeConcernStatus);
+ auto commandStatus = std::move(cmdResponse.commandStatus);
+ auto writeConcernStatus = std::move(cmdResponse.writeConcernStatus);
if ((!commandStatus.isOK() || !writeConcernStatus.isOK()) &&
- checkMetadataForSuccess(opCtx, nss, minKey, maxKey)) {
-
+ checkMetadataForSuccess(opCtx, nss, epoch, ChunkRange(minKey, maxKey))) {
LOG(1) << "mergeChunk [" << redact(minKey) << "," << redact(maxKey)
<< ") has already been committed.";
- } else if (!commandStatus.isOK()) {
- return commandStatus.withContext("Failed to commit chunk merge");
- } else if (!writeConcernStatus.isOK()) {
- return writeConcernStatus.withContext("Failed to commit chunk merge");
}
- return Status::OK();
+ uassertStatusOKWithContext(commandStatus, "Failed to commit chunk merge");
+ uassertStatusOKWithContext(writeConcernStatus, "Failed to commit chunk merge");
}
class MergeChunksCommand : public ErrmsgCommandDeprecated {
@@ -325,22 +291,22 @@ public:
}
// Required
- static BSONField<string> nsField;
- static BSONField<vector<BSONObj>> boundsField;
+ static BSONField<std::string> nsField;
+ static BSONField<std::vector<BSONObj>> boundsField;
// Optional, if the merge is only valid for a particular epoch
static BSONField<OID> epochField;
bool errmsgRun(OperationContext* opCtx,
- const string& dbname,
+ const std::string& dbname,
const BSONObj& cmdObj,
- string& errmsg,
+ std::string& errmsg,
BSONObjBuilder& result) override {
uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands());
const NamespaceString nss(parseNs(dbname, cmdObj));
- vector<BSONObj> bounds;
+ std::vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
return false;
}
@@ -374,15 +340,14 @@ public:
return false;
}
- auto mergeStatus = mergeChunks(opCtx, nss, minKey, maxKey, epoch);
- uassertStatusOK(mergeStatus);
+ mergeChunks(opCtx, nss, minKey, maxKey, epoch);
return true;
}
} mergeChunksCmd;
-BSONField<string> MergeChunksCommand::nsField("mergeChunks");
-BSONField<vector<BSONObj>> MergeChunksCommand::boundsField("bounds");
+BSONField<std::string> MergeChunksCommand::nsField("mergeChunks");
+BSONField<std::vector<BSONObj>> MergeChunksCommand::boundsField("bounds");
BSONField<OID> MergeChunksCommand::epochField("epoch");
} // namespace
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 6bf3f7718b3..49a350d096b 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1161,14 +1161,14 @@ CollectionShardingRuntime::CleanupNotification MigrationDestinationManager::_not
AutoGetCollection autoColl(opCtx, _nss, MODE_IX, MODE_X);
auto* const css = CollectionShardingRuntime::get(opCtx, _nss);
-
- auto metadata = css->getMetadata(opCtx);
+ const auto optMetadata = css->getCurrentMetadataIfKnown();
// This can currently happen because drops aren't synchronized with in-migrations. The idea for
// checking this here is that in the future we shouldn't have this problem.
- if (!metadata->isSharded() || metadata->getCollVersion().epoch() != _epoch) {
+ if (!optMetadata || !(*optMetadata)->isSharded() ||
+ (*optMetadata)->getCollVersion().epoch() != _epoch) {
return Status{ErrorCodes::StaleShardVersion,
- str::stream() << "not noting chunk " << redact(range.toString())
+ str::stream() << "Not marking chunk " << redact(range.toString())
<< " as pending because the epoch of "
<< _nss.ns()
<< " changed"};
@@ -1192,14 +1192,14 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx, ChunkR
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
AutoGetCollection autoColl(opCtx, _nss, MODE_IX, MODE_X);
auto* const css = CollectionShardingRuntime::get(opCtx, _nss);
-
- auto metadata = css->getMetadata(opCtx);
+ const auto optMetadata = css->getCurrentMetadataIfKnown();
// This can currently happen because drops aren't synchronized with in-migrations. The idea for
// checking this here is that in the future we shouldn't have this problem.
- if (!metadata->isSharded() || metadata->getCollVersion().epoch() != _epoch) {
- log() << "no need to forget pending chunk " << redact(range.toString())
- << " because the epoch for " << _nss.ns() << " changed";
+ if (!optMetadata || !(*optMetadata)->isSharded() ||
+ (*optMetadata)->getCollVersion().epoch() != _epoch) {
+ LOG(0) << "No need to forget pending chunk " << redact(range.toString())
+ << " because the epoch for " << _nss.ns() << " changed";
return;
}
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 57937d9c5fe..15ae251d65f 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -112,19 +112,6 @@ void refreshRecipientRoutingTable(OperationContext* opCtx,
executor->scheduleRemoteCommand(request, noOp).getStatus().ignore();
}
-Status checkCollectionEpochMatches(const ScopedCollectionMetadata& metadata, OID expectedEpoch) {
- if (metadata->isSharded() && metadata->getCollVersion().epoch() == expectedEpoch)
- return Status::OK();
-
- return {ErrorCodes::IncompatibleShardingMetadata,
- str::stream() << "The collection was dropped or recreated since the migration began. "
- << "Expected collection epoch: "
- << expectedEpoch.toString()
- << ", but found: "
- << (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
- : "unsharded collection.")};
-}
-
} // namespace
MONGO_FAIL_POINT_DEFINE(doNotRefreshRecipientAfterCommit);
@@ -170,9 +157,15 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
collectionUUID = autoColl.getCollection()->uuid().value();
}
- auto metadata = CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx);
+ auto optMetadata =
+ CollectionShardingState::get(opCtx, getNss())->getCurrentMetadataIfKnown();
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "The collection's sharding state was cleared by a concurrent operation",
+ optMetadata);
+
+ auto& metadata = *optMetadata;
uassert(ErrorCodes::IncompatibleShardingMetadata,
- str::stream() << "cannot move chunks for an unsharded collection",
+ "Cannot move chunks for an unsharded collection",
metadata->isSharded());
return std::make_tuple(std::move(metadata), std::move(collectionUUID));
@@ -241,14 +234,7 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
{
// Register for notifications from the replication subsystem
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
- auto* const css = CollectionShardingRuntime::get(opCtx, getNss());
-
- const auto metadata = css->getMetadata(opCtx);
- Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
- if (!status.isOK())
- return status;
+ const auto metadata = _getCurrentMetadataAndCheckEpoch(opCtx);
// Having the metadata manager registered on the collection sharding state is what indicates
// that a chunk on that collection is being migrated. With an active migration, write
@@ -257,6 +243,9 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, metadata->getKeyPattern(), _donorConnStr, _recipientHost);
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IX, MODE_X);
+ auto* const css = CollectionShardingRuntime::get(opCtx, getNss());
invariant(nullptr == std::exchange(msmForCsr(css), this));
_state = kCloning;
}
@@ -296,19 +285,7 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* opCtx) {
_stats.totalDonorChunkCloneTimeMillis.addAndFetch(_cloneAndCommitTimer.millis());
_cloneAndCommitTimer.reset();
- {
- const auto metadata = [&] {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(opCtx);
- }();
-
- Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
- if (!status.isOK())
- return status;
-
- _notifyChangeStreamsOnRecipientFirstChunk(opCtx, metadata);
- }
+ _notifyChangeStreamsOnRecipientFirstChunk(opCtx, _getCurrentMetadataAndCheckEpoch(opCtx));
// Mark the shard as running critical operation, which requires recovery on crash.
//
@@ -384,15 +361,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
BSONObjBuilder builder;
{
- const auto metadata = [&] {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, _args.getNss())->getMetadata(opCtx);
- }();
-
- Status status = checkCollectionEpochMatches(metadata, _collectionEpoch);
- if (!status.isOK())
- return status;
+ const auto metadata = _getCurrentMetadataAndCheckEpoch(opCtx);
boost::optional<ChunkType> controlChunkType = boost::none;
ChunkType differentChunk;
@@ -535,18 +504,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
<< "' after commit failed");
}
- auto refreshedMetadata = [&] {
- UninterruptibleLockGuard noInterrupt(opCtx->lockState());
- AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
- return CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx);
- }();
-
- if (!refreshedMetadata->isSharded()) {
- return {ErrorCodes::NamespaceNotSharded,
- str::stream() << "Chunk move failed because collection '" << getNss().ns()
- << "' is no longer sharded. The migration commit error was: "
- << migrationCommitStatus.toString()};
- }
+ const auto refreshedMetadata = _getCurrentMetadataAndCheckEpoch(opCtx);
if (refreshedMetadata->keyBelongsToMe(_args.getMinKey())) {
// This condition may only happen if the migration commit has failed for any reason
@@ -571,8 +529,8 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
}
// Migration succeeded
- log() << "Migration succeeded and updated collection version to "
- << refreshedMetadata->getCollVersion();
+ LOG(0) << "Migration succeeded and updated collection version to "
+ << refreshedMetadata->getCollVersion();
MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection);
@@ -661,6 +619,32 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
}
}
+ScopedCollectionMetadata MigrationSourceManager::_getCurrentMetadataAndCheckEpoch(
+ OperationContext* opCtx) {
+ auto metadata = [&] {
+ UninterruptibleLockGuard noInterrupt(opCtx->lockState());
+ AutoGetCollection autoColl(opCtx, getNss(), MODE_IS);
+ auto* const css = CollectionShardingRuntime::get(opCtx, getNss());
+
+ const auto optMetadata = css->getCurrentMetadataIfKnown();
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ "The collection's sharding state was cleared by a concurrent operation",
+ optMetadata);
+ return *optMetadata;
+ }();
+
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream() << "The collection was dropped or recreated since the migration began. "
+ << "Expected collection epoch: "
+ << _collectionEpoch.toString()
+ << ", but found: "
+ << (metadata->isSharded() ? metadata->getCollVersion().epoch().toString()
+ : "unsharded collection."),
+ metadata->isSharded() && metadata->getCollVersion().epoch() == _collectionEpoch);
+
+ return metadata;
+}
+
void MigrationSourceManager::_notifyChangeStreamsOnRecipientFirstChunk(
OperationContext* opCtx, const ScopedCollectionMetadata& metadata) {
// If this is not the first donation, there is nothing to be done
diff --git a/src/mongo/db/s/migration_source_manager.h b/src/mongo/db/s/migration_source_manager.h
index c053b73736e..eaaacdf0590 100644
--- a/src/mongo/db/s/migration_source_manager.h
+++ b/src/mongo/db/s/migration_source_manager.h
@@ -185,6 +185,8 @@ private:
// comments explaining the various state transitions.
enum State { kCreated, kCloning, kCloneCaughtUp, kCriticalSection, kCloneCompleted, kDone };
+ ScopedCollectionMetadata _getCurrentMetadataAndCheckEpoch(OperationContext* opCtx);
+
/**
* If this donation moves the first chunk to the recipient (i.e., the recipient didn't have any
* chunks), this function writes a no-op message to the oplog, so that change stream will notice
diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp
index 99a7a0b3bb6..2d99d322752 100644
--- a/src/mongo/db/s/set_shard_version_command.cpp
+++ b/src/mongo/db/s/set_shard_version_command.cpp
@@ -232,11 +232,12 @@ public:
boost::optional<Lock::CollectionLock> collLock;
collLock.emplace(opCtx->lockState(), nss.ns(), MODE_IS);
- auto const css = CollectionShardingState::get(opCtx, nss);
+ auto* const css = CollectionShardingState::get(opCtx, nss);
const ChunkVersion collectionShardVersion = [&] {
- auto metadata = css->getMetadata(opCtx);
- return metadata->isSharded() ? metadata->getShardVersion()
- : ChunkVersion::UNSHARDED();
+ auto optMetadata = css->getCurrentMetadataIfKnown();
+ return (optMetadata && (*optMetadata)->isSharded())
+ ? (*optMetadata)->getShardVersion()
+ : ChunkVersion::UNSHARDED();
}();
if (requestedVersion.isWriteCompatibleWith(collectionShardVersion)) {
@@ -350,11 +351,13 @@ public:
{
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- ChunkVersion currVersion = ChunkVersion::UNSHARDED();
- auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
- if (metadata->isSharded()) {
- currVersion = metadata->getShardVersion();
- }
+ const ChunkVersion currVersion = [&] {
+ auto* const css = CollectionShardingState::get(opCtx, nss);
+ auto optMetadata = css->getCurrentMetadataIfKnown();
+ return (optMetadata && (*optMetadata)->isSharded())
+ ? (*optMetadata)->getShardVersion()
+ : ChunkVersion::UNSHARDED();
+ }();
if (!status.isOK()) {
// The reload itself was interrupted or confused here
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
index 1fb93a6213f..fbcc31a0557 100644
--- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
+++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp
@@ -74,19 +74,16 @@ void onShardVersionMismatch(OperationContext* opCtx,
const auto currentShardVersion = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- const auto currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
- if (currentMetadata->isSharded()) {
- return currentMetadata->getShardVersion();
- }
-
- return ChunkVersion::UNSHARDED();
+ return CollectionShardingState::get(opCtx, nss)->getCurrentShardVersionIfKnown();
}();
- if (currentShardVersion.epoch() == shardVersionReceived.epoch() &&
- currentShardVersion.majorVersion() >= shardVersionReceived.majorVersion()) {
- // Don't need to remotely reload if we're in the same epoch and the requested version is
- // smaller than the one we know about. This means that the remote side is behind.
- return;
+ if (currentShardVersion) {
+ if (currentShardVersion->epoch() == shardVersionReceived.epoch() &&
+ currentShardVersion->majorVersion() >= shardVersionReceived.majorVersion()) {
+ // Don't need to remotely reload if we're in the same epoch and the requested version is
+ // smaller than the one we know about. This means that the remote side is behind.
+ return;
+ }
}
forceShardFilteringMetadataRefresh(opCtx, nss, forceRefreshFromThisThread);
@@ -136,58 +133,69 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx,
invariant(!opCtx->lockState()->isLocked());
invariant(!opCtx->getClient()->isInDirectClient());
- auto const shardingState = ShardingState::get(opCtx);
+ auto* const shardingState = ShardingState::get(opCtx);
invariant(shardingState->canAcceptShardedCommands());
- const auto routingInfo =
+ auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh(
opCtx, nss, forceRefreshFromThisThread));
- const auto cm = routingInfo.cm();
+ auto cm = routingInfo.cm();
if (!cm) {
// No chunk manager, so unsharded.
// Exclusive collection lock needed since we're now changing the metadata
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
-
- auto* const css = CollectionShardingRuntime::get(opCtx, nss);
- css->setFilteringMetadata(opCtx, CollectionMetadata());
+ CollectionShardingRuntime::get(opCtx, nss)
+ ->setFilteringMetadata(opCtx, CollectionMetadata());
return ChunkVersion::UNSHARDED();
}
+ // Optimistic check with only IS lock in order to avoid threads piling up on the collection X
+ // lock below
{
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ auto optMetadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadataIfKnown();
// We already have newer version
- if (metadata->isSharded() &&
- metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
- metadata->getCollVersion() >= cm->getVersion()) {
- LOG(1) << "Skipping refresh of metadata for " << nss << " "
- << metadata->getCollVersion() << " with an older " << cm->getVersion();
- return metadata->getShardVersion();
+ if (optMetadata) {
+ const auto& metadata = *optMetadata;
+ if (metadata->isSharded() &&
+ metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
+ metadata->getCollVersion() >= cm->getVersion()) {
+ LOG(1) << "Skipping refresh of metadata for " << nss << " "
+ << metadata->getCollVersion() << " with an older " << cm->getVersion();
+ return metadata->getShardVersion();
+ }
}
}
// Exclusive collection lock needed since we're now changing the metadata
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
-
auto* const css = CollectionShardingRuntime::get(opCtx, nss);
- auto metadata = css->getMetadata(opCtx);
+ {
+ auto optMetadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadataIfKnown();
- // We already have newer version
- if (metadata->isSharded() && metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
- metadata->getCollVersion() >= cm->getVersion()) {
- LOG(1) << "Skipping refresh of metadata for " << nss << " " << metadata->getCollVersion()
- << " with an older " << cm->getVersion();
- return metadata->getShardVersion();
+ // We already have newer version
+ if (optMetadata) {
+ const auto& metadata = *optMetadata;
+ if (metadata->isSharded() &&
+ metadata->getCollVersion().epoch() == cm->getVersion().epoch() &&
+ metadata->getCollVersion() >= cm->getVersion()) {
+ LOG(1) << "Skipping refresh of metadata for " << nss << " "
+ << metadata->getCollVersion() << " with an older " << cm->getVersion();
+ return metadata->getShardVersion();
+ }
+ }
}
- css->setFilteringMetadata(opCtx, CollectionMetadata(cm, shardingState->shardId()));
+ CollectionMetadata metadata(std::move(cm), shardingState->shardId());
+ const auto newShardVersion = metadata.getShardVersion();
- return css->getMetadata(opCtx)->getShardVersion();
+ css->setFilteringMetadata(opCtx, std::move(metadata));
+ return newShardVersion;
}
Status onDbVersionMismatchNoExcept(
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index 622225a9977..3078fd59396 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -206,8 +206,8 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
std::vector<InsertStatement>::const_iterator begin,
std::vector<InsertStatement>::const_iterator end,
bool fromMigrate) {
- auto const css = CollectionShardingState::get(opCtx, nss);
- const auto metadata = css->getMetadata(opCtx);
+ auto* const css = CollectionShardingState::get(opCtx, nss);
+ const auto metadata = css->getMetadataForOperation(opCtx);
for (auto it = begin; it != end; ++it) {
const auto& insertedDoc = it->doc;
@@ -232,8 +232,8 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
}
void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
- auto const css = CollectionShardingState::get(opCtx, args.nss);
- const auto metadata = css->getMetadata(opCtx);
+ auto* const css = CollectionShardingState::get(opCtx, args.nss);
+ const auto metadata = css->getMetadataForOperation(opCtx);
if (args.nss == NamespaceString::kShardConfigCollectionsNamespace) {
// Notification of routing table changes are only needed on secondaries
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index f82f8f56bf2..d3c0a6908db 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -96,16 +96,17 @@ bool checkIfSingleDoc(OperationContext* opCtx,
*/
bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
const NamespaceString& nss,
+ const OID& epoch,
const ChunkRange& chunkRange,
const std::vector<BSONObj>& splitKeys) {
const auto metadataAfterSplit = [&] {
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
- return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx);
+ return CollectionShardingState::get(opCtx, nss)->getCurrentMetadata();
}();
uassert(ErrorCodes::StaleEpoch,
- str::stream() << "Collection " << nss.ns() << " became unsharded",
- metadataAfterSplit->isSharded());
+ str::stream() << "Collection " << nss.ns() << " changed since split start",
+ metadataAfterSplit->getCollVersion().epoch() == epoch);
auto newChunkBounds(splitKeys);
auto startKey = chunkRange.getMin();
@@ -208,7 +209,8 @@ StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
if (!commandStatus.isOK() || !writeConcernStatus.isOK()) {
forceShardFilteringMetadataRefresh(opCtx, nss);
- if (checkMetadataForSuccessfulSplitChunk(opCtx, nss, chunkRange, splitKeys)) {
+ if (checkMetadataForSuccessfulSplitChunk(
+ opCtx, nss, expectedCollectionEpoch, chunkRange, splitKeys)) {
// Split was committed.
} else if (!commandStatus.isOK()) {
return commandStatus;
diff --git a/src/mongo/s/stale_exception.cpp b/src/mongo/s/stale_exception.cpp
index 67e8c7b8ba8..42fc136e1b4 100644
--- a/src/mongo/s/stale_exception.cpp
+++ b/src/mongo/s/stale_exception.cpp
@@ -44,7 +44,7 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(StaleDbRoutingVersion);
boost::optional<ChunkVersion> extractOptionalVersion(const BSONObj& obj, StringData field) {
auto swChunkVersion = ChunkVersion::parseLegacyWithField(obj, field);
if (swChunkVersion == ErrorCodes::NoSuchKey)
- return ChunkVersion::UNSHARDED();
+ return boost::none;
return uassertStatusOK(std::move(swChunkVersion));
}