summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2022-04-04 11:58:21 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-04 12:38:14 +0000
commit8798813a699db794ca86e12d518b40dc19eb92ee (patch)
tree1bfd032aa0e1b5fe07cfe25cd66577ff9873902f
parent93b609f94dd58452ba28f277a6739b7740b9980a (diff)
downloadmongo-8798813a699db794ca86e12d518b40dc19eb92ee.tar.gz
SERVER-65160 Make StaleShardVersion as obsolete
-rw-r--r--jstests/concurrency/fsm_workloads/collection_uuid.js2
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js2
-rw-r--r--jstests/sharding/query/agg_shard_targeting.js2
-rw-r--r--src/mongo/base/error_codes.yml7
-rw-r--r--src/mongo/db/ops/write_ops.cpp5
-rw-r--r--src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp28
-rw-r--r--src/mongo/db/pipeline/sharded_union_test.cpp34
-rw-r--r--src/mongo/db/s/collection_metadata.cpp22
-rw-r--r--src/mongo/db/s/collection_metadata.h6
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp2
-rw-r--r--src/mongo/s/append_raw_responses_test.cpp13
-rw-r--r--src/mongo/s/stale_exception.cpp40
-rw-r--r--src/mongo/s/stale_exception.h41
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp5
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp10
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp4
-rw-r--r--src/mongo/s/write_ops/write_op.cpp4
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp19
18 files changed, 140 insertions, 106 deletions
diff --git a/jstests/concurrency/fsm_workloads/collection_uuid.js b/jstests/concurrency/fsm_workloads/collection_uuid.js
index 8885d194260..f5367697cf7 100644
--- a/jstests/concurrency/fsm_workloads/collection_uuid.js
+++ b/jstests/concurrency/fsm_workloads/collection_uuid.js
@@ -51,7 +51,7 @@ const runCommandInLoop = function(
ErrorCodes.ConflictingOperationInProgress,
ErrorCodes.BackgroundOperationInProgressForNamespace,
ErrorCodes.ReshardCollectionInProgress,
- ErrorCodes.StaleShardVersion,
+ ErrorCodes.OBSOLETE_StaleShardVersion,
ErrorCodes.QueryPlanKilled,
];
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
index ffe7ef756a2..f769450a6c7 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
@@ -79,7 +79,7 @@ var $config = extendWorkload($config, function($config, $super) {
// filter out those errors.
let skippableErrors = [
ErrorCodes.StaleConfig,
- ErrorCodes.StaleShardVersion,
+ ErrorCodes.OBSOLETE_StaleShardVersion,
ErrorCodes.WriteConflict,
ErrorCodes.LockTimeout,
ErrorCodes.PreparedTransactionInProgress,
diff --git a/jstests/sharding/query/agg_shard_targeting.js b/jstests/sharding/query/agg_shard_targeting.js
index e624f701a07..334127150b1 100644
--- a/jstests/sharding/query/agg_shard_targeting.js
+++ b/jstests/sharding/query/agg_shard_targeting.js
@@ -87,7 +87,7 @@ assert.commandWorked(mongosColl.insert({_id: 50}));
assert.commandWorked(mongosColl.insert({_id: 150}));
const shardExceptions =
- [ErrorCodes.StaleConfig, ErrorCodes.StaleShardVersion, ErrorCodes.StaleEpoch];
+ [ErrorCodes.StaleConfig, ErrorCodes.OBSOLETE_StaleShardVersion, ErrorCodes.StaleEpoch];
// Create an $_internalSplitPipeline stage that forces the merge to occur on the Primary shard.
const forcePrimaryMerge = [{$_internalSplitPipeline: {mergeType: "primaryShard"}}];
diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml
index fccba446c5c..809a76b89b9 100644
--- a/src/mongo/base/error_codes.yml
+++ b/src/mongo/base/error_codes.yml
@@ -96,7 +96,10 @@ error_codes:
- {code: 60,name: OBSOLETE_DatabaseNotFound}
- {code: 61,name: ShardKeyNotFound}
- {code: 62,name: OplogOperationUnsupported}
- - {code: 63,name: StaleShardVersion,categories: [StaleShardVersionError,NeedRetargettingError]}
+ # This error code is obsolete as of version 6.0 and no new places where it is thrown should be
+ # added. Use StaleConfig.
+ # TODO (SERVER-63327): Retire this code once 6.0 becomes LTS
+ - {code: 63,name: OBSOLETE_StaleShardVersion,categories: [StaleShardVersionError,NeedRetargettingError]}
- {code: 64,name: WriteConcernFailed,categories: [WriteConcernError]}
- {code: 65,name: MultipleErrorsOccurred, extra: MultipleErrorsOccurredInfo}
- {code: 66,name: ImmutableField}
@@ -187,6 +190,8 @@ error_codes:
- {code: 147,name: ZLibError}
- {code: 148,name: ReadConcernMajorityNotEnabled,categories: [VoteAbortError]}
- {code: 149,name: NoConfigPrimary}
+ # This error code is obsolete as of version 6.0 and no new places where it is thrown should be
+ # added. Use StaleConfig.
- {code: 150,name: StaleEpoch,
categories: [StaleShardVersionError,NeedRetargettingError],
extra: StaleEpochInfo,
diff --git a/src/mongo/db/ops/write_ops.cpp b/src/mongo/db/ops/write_ops.cpp
index 14c5ca97aa0..5218c29e6eb 100644
--- a/src/mongo/db/ops/write_ops.cpp
+++ b/src/mongo/db/ops/write_ops.cpp
@@ -302,7 +302,7 @@ WriteError WriteError::parse(const BSONObj& obj) {
//
// TODO (SERVER-63327): This special parsing should be removed in the stable version
// following the resolution of this ticket.
- if (code == ErrorCodes::StaleShardVersion) {
+ if (code == ErrorCodes::OBSOLETE_StaleShardVersion) {
return Status(ErrorCodes::StaleConfig,
std::move(errmsg),
obj[WriteError::kErrInfoFieldName].Obj());
@@ -327,7 +327,8 @@ BSONObj WriteError::serialize() const {
// TODO (SERVER-63327): This special serialisation should be removed in the stable version
// following the resolution of this ticket.
if (_status == ErrorCodes::StaleConfig) {
- errBuilder.append(WriteError::kCodeFieldName, int32_t(ErrorCodes::StaleShardVersion));
+ errBuilder.append(WriteError::kCodeFieldName,
+ int32_t(ErrorCodes::OBSOLETE_StaleShardVersion));
errBuilder.append(WriteError::kErrmsgFieldName, _status.reason());
auto extraInfo = _status.extraInfo();
invariant(extraInfo);
diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
index ebf3d147c8f..069a7e2f0b2 100644
--- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
+++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp
@@ -167,13 +167,18 @@ TEST_F(DispatchShardPipelineTest, DispatchShardPipelineDoesNotRetryOnStaleConfig
ASSERT_THROWS_CODE(sharded_agg_helpers::dispatchShardPipeline(
serializedCommand, hasChangeStream, std::move(pipeline)),
AssertionException,
- ErrorCodes::StaleShardVersion);
+ ErrorCodes::StaleConfig);
});
// Mock out an error response.
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(
- {Status{ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"}});
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 0};
+ return createErrorCursorResponse({StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion(1, 0, epoch, timestamp),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
future.default_timed_get();
}
@@ -205,20 +210,24 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
ASSERT(!bool(results.splitPipeline));
});
+ const OID epoch{OID::gen()};
+ const Timestamp timestamp{1, 0};
+ const UUID uuid{UUID::gen()};
+
// Mock out one error response, then expect a refresh of the sharding catalog for that
// namespace, then mock out a successful response.
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(
- {ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"});
+ return createErrorCursorResponse({StaleConfigInfo(kTestAggregateNss,
+ ChunkVersion(2, 0, epoch, timestamp),
+ boost::none,
+ ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries.
- const OID epoch = OID::gen();
- const UUID uuid = UUID::gen();
- const Timestamp timestamp(1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
- ChunkVersion version(1, 0, epoch, timestamp);
+ ChunkVersion version(2, 0, epoch, timestamp);
ChunkType chunk1(
uuid, {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << 0)}, version, {"0"});
@@ -229,6 +238,7 @@ TEST_F(DispatchShardPipelineTest, WrappedDispatchDoesRetryOnStaleConfigError) {
uuid, {BSON("_id" << 0), shardKeyPattern.getKeyPattern().globalMax()}, version, {"1"});
chunk2.setName(OID::gen());
version.incMinor();
+
expectCollectionAndChunksAggregation(
kTestAggregateNss, epoch, timestamp, uuid, shardKeyPattern, {chunk1, chunk2});
diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp
index b385a7ff206..79863fc7f14 100644
--- a/src/mongo/db/pipeline/sharded_union_test.cpp
+++ b/src/mongo/db/pipeline/sharded_union_test.cpp
@@ -161,8 +161,12 @@ TEST_F(ShardedUnionTest, RetriesSubPipelineOnStaleConfigError) {
// Mock out one error response, then expect a refresh of the sharding catalog for that
// namespace, then mock out a successful response.
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(
- Status{ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"});
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 0};
+ return createErrorCursorResponse(Status{
+ StaleConfigInfo(
+ kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries.
@@ -239,8 +243,13 @@ TEST_F(ShardedUnionTest, CorrectlySplitsSubPipelineIfRefreshedDistributionRequir
// sharding catalog for that namespace.
onCommand([&](const executor::RemoteCommandRequest& request) {
ASSERT_EQ(request.target, HostAndPort(shards[1].getHost()));
- return createErrorCursorResponse(
- Status{ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"});
+
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 0};
+ return createErrorCursorResponse(Status{
+ StaleConfigInfo(
+ kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution as if a chunk [0, 10] was
@@ -324,20 +333,25 @@ TEST_F(ShardedUnionTest, AvoidsSplittingSubPipelineIfRefreshedDistributionDoesNo
// Mock out an error response from both shards, then expect a refresh of the sharding catalog
// for that namespace, then mock out a successful response.
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 1};
+
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(
- Status{ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(Status{
+ StaleConfigInfo(
+ kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
onCommand([&](const executor::RemoteCommandRequest& request) {
- return createErrorCursorResponse(
- Status{ErrorCodes::StaleShardVersion, "Mock error: shard version mismatch"});
+ return createErrorCursorResponse(Status{
+ StaleConfigInfo(
+ kTestAggregateNss, ChunkVersion(1, 0, epoch, timestamp), boost::none, ShardId{"0"}),
+ "Mock error: shard version mismatch"});
});
// Mock the expected config server queries. Update the distribution so that all chunks are on
// the same shard.
- const OID epoch = OID::gen();
const UUID uuid = UUID::gen();
- const Timestamp timestamp(1, 1);
const ShardKeyPattern shardKeyPattern(BSON("_id" << 1));
ChunkVersion version(1, 0, epoch, timestamp);
ChunkType chunk1(
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 6982d4c0650..f68c39ef0ce 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -181,28 +181,6 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk
return true;
}
-Status CollectionMetadata::checkRangeIsValid(const BSONObj& min, const BSONObj& max) const {
- invariant(isSharded());
-
- ChunkType existingChunk;
-
- if (!getNextChunk(min, &existingChunk)) {
- return {ErrorCodes::StaleShardVersion,
- str::stream() << "Chunk with bounds " << ChunkRange(min, max).toString()
- << " is not owned by this shard."};
- }
-
- const ChunkRange receivedRange(min, max);
- const auto owningRange = existingChunk.getRange();
-
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "Rejecting moveRange because the provided range is spanning "
- "across more than one chunk",
- owningRange.covers(receivedRange));
-
- return Status::OK();
-}
-
bool CollectionMetadata::currentShardHasAnyChunks() const {
invariant(isSharded());
std::set<ShardId> shards;
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index be11da1918d..84ff44a6c90 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -204,12 +204,6 @@ public:
bool getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const;
/**
- * Validates that the passed-in range's bounds belong to exactly one chunk in the metadata
- * cache.
- */
- Status checkRangeIsValid(const BSONObj& min, const BSONObj& max) const;
-
- /**
* Returns true if the argument range overlaps any chunk.
*/
bool rangeOverlapsChunk(const ChunkRange& range) const {
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index f090929c4e5..72568b34227 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -273,7 +273,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
<< ChunkRange(_args.getMinKey(), _args.getMaxKey()).toString()
<< " . The closest owned chunk is "
<< ChunkRange(existingChunk.getMin(), existingChunk.getMax()).toString(),
- collectionMetadata.checkRangeIsValid(_args.getMinKey(), _args.getMaxKey()).isOK());
+ existingChunk.getRange().covers(ChunkRange(_args.getMinKey(), _args.getMaxKey())));
_collectionEpoch = collectionVersion.epoch();
_collectionUUID = collectionUUID;
diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp
index 3ded061ce00..9bbc161bfb1 100644
--- a/src/mongo/s/append_raw_responses_test.cpp
+++ b/src/mongo/s/append_raw_responses_test.cpp
@@ -50,8 +50,6 @@ const Status kShardNotFoundStatus{ErrorCodes::ShardNotFound, "dummy"};
const Status kError1Status{ErrorCodes::HostUnreachable, "dummy"};
const Status kError2Status{ErrorCodes::HostUnreachable, "dummy"};
-const Status kStaleConfigErrorStatus{ErrorCodes::StaleShardVersion, "dummy"};
-
const Status kWriteConcernError1Status{ErrorCodes::WriteConcernFailed, "dummy"};
const Status kWriteConcernError2Status{ErrorCodes::UnsatisfiableWriteConcern, "dummy"};
@@ -195,6 +193,17 @@ protected:
const std::vector<ShardId> kShardIdList{kShard1, kShard2, kShard3, kShard4, kShard5};
+ const Status kStaleConfigErrorStatus{[] {
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 0};
+ return StaleConfigInfo(
+ NamespaceString("Foo.Bar"),
+ ChunkVersion(1, 0, epoch, timestamp),
+ boost::none,
+ ShardId{"dummy"});
+ }(),
+ "dummy"};
+
private:
static void _assertShardIdsMatch(const std::set<ShardId>& expectedShardIds,
const std::set<ShardId>& actualShardIds) {
diff --git a/src/mongo/s/stale_exception.cpp b/src/mongo/s/stale_exception.cpp
index 3ea4c75816f..9f201a157b3 100644
--- a/src/mongo/s/stale_exception.cpp
+++ b/src/mongo/s/stale_exception.cpp
@@ -27,8 +27,6 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-
#include "mongo/s/stale_exception.h"
#include "mongo/base/init.h"
@@ -43,6 +41,44 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(StaleDbRoutingVersion);
} // namespace
+void StaleConfigInfo::serialize(BSONObjBuilder* bob) const {
+ bob->append("ns", _nss.ns());
+ _received.appendLegacyWithField(bob, "vReceived");
+ if (_wanted) {
+ _wanted->appendLegacyWithField(bob, "vWanted");
+ }
+
+ invariant(_shardId != "");
+ bob->append("shardId", _shardId.toString());
+}
+
+std::shared_ptr<const ErrorExtraInfo> StaleConfigInfo::parse(const BSONObj& obj) {
+ return std::make_shared<StaleConfigInfo>(parseFromCommandError(obj));
+}
+
+StaleConfigInfo StaleConfigInfo::parseFromCommandError(const BSONObj& obj) {
+ const auto shardId = obj["shardId"].String();
+ invariant(shardId != "");
+
+ auto extractOptionalChunkVersion = [&obj](StringData field) -> boost::optional<ChunkVersion> {
+ try {
+ return boost::make_optional<ChunkVersion>(
+ ChunkVersion::fromBSONLegacyOrNewerFormat(obj, field));
+ } catch (const DBException& ex) {
+ auto status = ex.toStatus();
+ if (status != ErrorCodes::NoSuchKey) {
+ throw;
+ }
+ }
+ return boost::none;
+ };
+
+ return StaleConfigInfo(NamespaceString(obj["ns"].String()),
+ ChunkVersion::fromBSONLegacyOrNewerFormat(obj, "vReceived"),
+ extractOptionalChunkVersion("vWanted"),
+ ShardId(shardId));
+}
+
void StaleDbRoutingVersion::serialize(BSONObjBuilder* bob) const {
bob->append("db", _db);
bob->append("vReceived", _received.toBSON());
diff --git a/src/mongo/s/stale_exception.h b/src/mongo/s/stale_exception.h
index 70d7a0dc42e..d67e3415731 100644
--- a/src/mongo/s/stale_exception.h
+++ b/src/mongo/s/stale_exception.h
@@ -72,44 +72,9 @@ public:
return _criticalSectionSignal;
}
- void serialize(BSONObjBuilder* bob) const {
- bob->append("ns", _nss.ns());
- _received.appendLegacyWithField(bob, "vReceived");
- if (_wanted) {
- _wanted->appendLegacyWithField(bob, "vWanted");
- }
-
- invariant(_shardId != "");
- bob->append("shardId", _shardId.toString());
- }
-
- static std::shared_ptr<const ErrorExtraInfo> parse(const BSONObj& obj) {
- return std::make_shared<StaleConfigInfo>(parseFromCommandError(obj));
- }
-
- static StaleConfigInfo parseFromCommandError(const BSONObj& obj) {
- const auto shardId = obj["shardId"].String();
- invariant(shardId != "");
-
- auto extractOptionalChunkVersion =
- [&obj](StringData field) -> boost::optional<ChunkVersion> {
- try {
- return boost::make_optional<ChunkVersion>(
- ChunkVersion::fromBSONLegacyOrNewerFormat(obj, field));
- } catch (const DBException& ex) {
- auto status = ex.toStatus();
- if (status != ErrorCodes::NoSuchKey) {
- throw;
- }
- }
- return boost::none;
- };
-
- return StaleConfigInfo(NamespaceString(obj["ns"].String()),
- ChunkVersion::fromBSONLegacyOrNewerFormat(obj, "vReceived"),
- extractOptionalChunkVersion("vWanted"),
- ShardId(shardId));
- }
+ void serialize(BSONObjBuilder* bob) const;
+ static std::shared_ptr<const ErrorExtraInfo> parse(const BSONObj& obj);
+ static StaleConfigInfo parseFromCommandError(const BSONObj& obj);
protected:
NamespaceString _nss;
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 99ed846f7f2..b61b8e01dbd 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -280,7 +280,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
if (responseStatus.isOK()) {
TrackedErrors trackedErrors;
trackedErrors.startTracking(ErrorCodes::StaleConfig);
- trackedErrors.startTracking(ErrorCodes::StaleShardVersion);
+ trackedErrors.startTracking(ErrorCodes::OBSOLETE_StaleShardVersion);
trackedErrors.startTracking(ErrorCodes::StaleDbVersion);
trackedErrors.startTracking(ErrorCodes::TenantMigrationAborted);
@@ -321,7 +321,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
auto staleConfigErrors = trackedErrors.getErrors(ErrorCodes::StaleConfig);
{
const auto& staleShardVersionErrors =
- trackedErrors.getErrors(ErrorCodes::StaleShardVersion);
+ trackedErrors.getErrors(ErrorCodes::OBSOLETE_StaleShardVersion);
staleConfigErrors.insert(staleConfigErrors.begin(),
staleShardVersionErrors.begin(),
staleShardVersionErrors.end());
@@ -440,6 +440,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx,
{logv2::LogComponent::kShardMigrationPerf},
"Finished post-migration commit refresh on the router with error",
"error"_attr = redact(ex));
+
// It's okay if we can't refresh, we'll just record errors for the ops if needed
LOGV2_WARNING(22911,
"Could not refresh targeter due to {error}",
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 36dcb474b78..bfda09f0814 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -284,7 +284,15 @@ TEST_F(BatchWriteOpTest, SingleStaleError) {
BatchedCommandResponse response;
buildResponse(0, &response);
- addError(ErrorCodes::StaleShardVersion, "mock stale error", 0, &response);
+ OID epoch{OID::gen()};
+ Timestamp timestamp{1, 0};
+ response.addToErrDetails(
+ write_ops::WriteError(0,
+ Status{StaleConfigInfo(nss,
+ ChunkVersion(101, 200, epoch, timestamp),
+ ChunkVersion(105, 200, epoch, timestamp),
+ ShardId("shard")),
+ "mock stale error"}));
// First stale response comes back, we should retry
batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr);
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 55fc49db16f..8e3a7157f6c 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -78,8 +78,8 @@ TEST(BatchedCommandResponseTest, StaleErrorAsStaleShardVersionCompatibility) {
staleInfo.serialize(&builder);
BSONArray writeErrorsArray(
- BSON_ARRAY(BSON("index" << 0 << "code" << ErrorCodes::StaleShardVersion << "errmsg"
- << "StaleShardVersion error"
+ BSON_ARRAY(BSON("index" << 0 << "code" << ErrorCodes::OBSOLETE_StaleShardVersion << "errmsg"
+ << "OBSOLETE_StaleShardVersion error"
<< "errInfo" << builder.obj())
<< BSON("index" << 1 << "code" << ErrorCodes::InvalidNamespace << "errmsg"
<< "index 1 failed too")));
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index f403714a020..c14af8181bc 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -36,8 +36,8 @@ namespace mongo {
namespace {
bool isRetryErrCode(int errCode) {
- return errCode == ErrorCodes::StaleShardVersion || errCode == ErrorCodes::StaleConfig ||
- errCode == ErrorCodes::StaleDbVersion ||
+ return errCode == ErrorCodes::OBSOLETE_StaleShardVersion ||
+ errCode == ErrorCodes::StaleConfig || errCode == ErrorCodes::StaleDbVersion ||
errCode == ErrorCodes::ShardCannotRefreshDueToLocksHeld ||
errCode == ErrorCodes::TenantMigrationAborted;
}
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 827688cd6a9..884ffc906c3 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -226,7 +226,12 @@ TEST_F(WriteOpTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
- 0, {ErrorCodes::StaleShardVersion, "simulate ssv error for test"});
+ 0,
+ {StaleConfigInfo(kNss,
+ ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ShardId("shardA")),
+ "simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
// State should not change until we have result from all nodes.
@@ -322,7 +327,10 @@ TEST_F(WriteOpTest, RetrySingleOp) {
assertEndpointsEqual(targeted.front()->endpoint, endpoint);
// Stale exception
- write_ops::WriteError error(0, {ErrorCodes::StaleShardVersion, "some message"});
+ write_ops::WriteError error(
+ 0,
+ {StaleConfigInfo(kNss, ChunkVersion::IGNORED(), boost::none, ShardId("shard")),
+ "some message"});
writeOp.noteWriteError(*targeted.front(), error);
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
@@ -412,7 +420,12 @@ TEST_F(WriteOpTransactionTest, TargetMultiAllShardsAndErrorSingleChildOp) {
// Simulate retryable error.
write_ops::WriteError retryableError(
- 0, {ErrorCodes::StaleShardVersion, "simulate ssv error for test"});
+ 0,
+ {StaleConfigInfo(kNss,
+ ChunkVersion(10, 0, OID(), Timestamp(1, 1)),
+ ChunkVersion(11, 0, OID(), Timestamp(1, 1)),
+ ShardId("shardA")),
+ "simulate ssv error for test"});
writeOp.noteWriteError(*targeted[0], retryableError);
// State should change to error right away even with retryable error when in a transaction.