summaryrefslogtreecommitdiff
path: root/src/mongo/executor
diff options
context:
space:
mode:
authorGeorge Wangensteen <george.wangensteen@mongodb.com>2022-11-08 20:32:19 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-08 21:45:02 +0000
commit7a45a7307f2aac8389a0cb0a9b805e28686b4874 (patch)
treeb927914a7b999ff1708127d3c841a6a2296d2cd0 /src/mongo/executor
parent36133c3b97aed247975af7940fb3b14ac40b38cc (diff)
downloadmongo-7a45a7307f2aac8389a0cb0a9b805e28686b4874.tar.gz
SERVER-71062 Make async RPC API hedging implementation use dynamic retry delay
Diffstat (limited to 'src/mongo/executor')
-rw-r--r--src/mongo/executor/hedged_async_rpc.h2
-rw-r--r--src/mongo/executor/hedged_async_rpc_test.cpp42
2 files changed, 43 insertions, 1 deletions
diff --git a/src/mongo/executor/hedged_async_rpc.h b/src/mongo/executor/hedged_async_rpc.h
index 476b84b09aa..d503776afca 100644
--- a/src/mongo/executor/hedged_async_rpc.h
+++ b/src/mongo/executor/hedged_async_rpc.h
@@ -188,7 +188,7 @@ SemiFuture<AsyncRPCResponse<typename CommandType::Reply>> sendHedgedCommand(
return token.isCanceled() ||
!retryPolicy->recordAndEvaluateRetry(swResponse.getStatus());
})
- .withDelayBetweenIterations(retryPolicy->getNextRetryDelay())
+ .withBackoffBetweenIterations(detail::RetryDelayAsBackoff(retryPolicy.get()))
.on(exec, CancellationToken::uncancelable())
.onCompletion([hedgeCancellationToken](StatusWith<SingleResponse> result) mutable {
hedgeCancellationToken.cancel();
diff --git a/src/mongo/executor/hedged_async_rpc_test.cpp b/src/mongo/executor/hedged_async_rpc_test.cpp
index 18a557cf26a..73c06e42a50 100644
--- a/src/mongo/executor/hedged_async_rpc_test.cpp
+++ b/src/mongo/executor/hedged_async_rpc_test.cpp
@@ -774,6 +774,48 @@ TEST_F(HedgedAsyncRPCTest, HedgedFailsWithFatalErrorAuthoritativeCanceled) {
ASSERT_EQ(remoteError.getRemoteCommandResult(), fatalInternalErrorStatus);
}
+/*
+ * Tests that 'sendHedgedCommand' will appropriately retry multiple times under the conditions
+ * defined by the retry policy, with a dynmically changing wait-time between retries.
+ */
+TEST_F(HedgedAsyncRPCTest, DynamicDelayBetweenRetries) {
+ HelloCommandReply helloReply = HelloCommandReply(TopologyVersion(OID::gen(), 0));
+ HelloCommand helloCmd;
+ initializeCommand(helloCmd);
+
+ // Define a retry policy that simply decides to always retry a command three additional times,
+ // with a different delay between each retry.
+ std::shared_ptr<TestRetryPolicy> testPolicy = std::make_shared<TestRetryPolicy>();
+ const auto maxNumRetries = 3;
+ const std::array<Milliseconds, maxNumRetries> retryDelays{
+ Milliseconds(100), Milliseconds(50), Milliseconds(10)};
+ testPolicy->setMaxNumRetries(maxNumRetries);
+ testPolicy->pushRetryDelay(retryDelays[0]);
+ testPolicy->pushRetryDelay(retryDelays[1]);
+ testPolicy->pushRetryDelay(retryDelays[2]);
+
+ auto resultFuture = sendHedgedCommandWithHosts(helloCmd, kTwoHosts, testPolicy);
+
+ const auto onCommandFunc = [&](const auto& request) {
+ ASSERT(request.cmdObj["hello"]);
+ return helloReply.toBSON();
+ };
+
+ // Schedule 1 response to the initial attempt, and then two for the following retries.
+ // Advance the clock appropriately based on each retry delay.
+ for (auto i = 0; i < maxNumRetries; i++) {
+ scheduleRequestAndAdvanceClockForRetry(testPolicy, onCommandFunc, retryDelays[i]);
+ }
+ // Schedule a response to the final retry. No need to advance clock since no more
+ // retries should be attemped after this third one.
+ onCommand(onCommandFunc);
+
+ // Wait until the RPC attempt is done, including all retries. Ignore the result.
+ resultFuture.wait();
+
+ ASSERT_EQ(maxNumRetries, testPolicy->getNumRetriesPerformed());
+}
+
} // namespace
} // namespace async_rpc
} // namespace mongo