summaryrefslogtreecommitdiff
path: root/src/mongo/s/server.cpp
diff options
context:
space:
mode:
authorBen Caimano <ben.caimano@10gen.com>2019-03-11 11:28:47 -0400
committerBen Caimano <ben.caimano@10gen.com>2019-04-16 16:24:22 -0400
commitd48f6caba6f97a578828d89ec2b95c41cbc5c67a (patch)
tree7ec768d3f9c4287fcd028f403a41bdd0b87c1cb3 /src/mongo/s/server.cpp
parent6fd5b4d17b87aa652381fd28cefe2a7eb5ec8d5d (diff)
downloadmongo-d48f6caba6f97a578828d89ec2b95c41cbc5c67a.tar.gz
SERVER-39818 Split RSM notification functionality into a new class
Diffstat (limited to 'src/mongo/s/server.cpp')
-rw-r--r--src/mongo/s/server.cpp58
1 files changed, 54 insertions, 4 deletions
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 9a8e6ebe71a..29c490ba8b0 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -122,6 +122,9 @@ using logger::LogComponent;
#define __has_feature(x) 0
#endif
+// Failpoint for disabling replicaSetChangeConfigServerUpdateHook calls on signaled mongos.
+MONGO_FAIL_POINT_DEFINE(failReplicaSetChangeConfigServerUpdateHook);
+
namespace {
#if defined(_WIN32)
@@ -341,6 +344,52 @@ void initWireSpec() {
spec.isInternalClient = true;
}
+class ShardingReplicaSetChangeListener final : public ReplicaSetChangeNotifier::Listener {
+public:
+ ShardingReplicaSetChangeListener(ServiceContext* serviceContext)
+ : _serviceContext(serviceContext) {}
+ ~ShardingReplicaSetChangeListener() final = default;
+
+ void onFoundSet(const Key& key) final {}
+
+ void onConfirmedSet(const State& state) final {
+ auto connStr = state.connStr;
+
+ auto fun = [ serviceContext = _serviceContext, connStr ](auto args) {
+ if (ErrorCodes::isCancelationError(args.status.code())) {
+ return;
+ }
+ uassertStatusOK(args.status);
+
+ LOG(0) << "Updating sharding state with confirmed set " << connStr;
+
+ Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr);
+
+ if (MONGO_FAIL_POINT(failReplicaSetChangeConfigServerUpdateHook)) {
+ return;
+ }
+ ShardRegistry::updateReplicaSetOnConfigServer(serviceContext, connStr);
+ };
+
+ auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor();
+ auto schedStatus = executor->scheduleWork(std::move(fun)).getStatus();
+ if (ErrorCodes::isCancelationError(schedStatus.code())) {
+ LOG(2) << "Unable to schedule confirmed set update due to " << schedStatus;
+ return;
+ }
+ uassertStatusOK(schedStatus);
+ }
+
+ void onPossibleSet(const State& state) final {
+ Grid::get(_serviceContext)->shardRegistry()->updateReplSetHosts(state.connStr);
+ }
+
+ void onDroppedSet(const Key& key) final {}
+
+private:
+ ServiceContext* _serviceContext;
+};
+
ExitCode runMongosServer(ServiceContext* serviceContext) {
Client::initThread("mongosMain");
printShardingVersionInfo(false);
@@ -379,10 +428,11 @@ ExitCode runMongosServer(ServiceContext* serviceContext) {
shardConnectionPool.addHook(new ShardingConnectionHook(true, std::move(shardedHookList)));
- ReplicaSetMonitor::setAsynchronousConfigChangeHook(
- &ShardRegistry::replicaSetChangeConfigServerUpdateHook);
- ReplicaSetMonitor::setSynchronousConfigChangeHook(
- &ShardRegistry::replicaSetChangeShardRegistryUpdateHook);
+ // Hook up a Listener for changes from the ReplicaSetMonitor
+ // This will last for the scope of this function. i.e. until shutdown finishes
+ auto shardingRSCL =
+ ReplicaSetMonitor::getNotifier().makeListener<ShardingReplicaSetChangeListener>(
+ serviceContext);
// Mongos connection pools already takes care of authenticating new connections so the
// replica set connection shouldn't need to.