summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-06-03 14:46:05 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-06-04 11:29:01 -0400
commit0d26400300a2c007c825c5a718e5484036adf3c8 (patch)
treedb60358d37f9273d985ba984ac4c9489ec5e54b0
parentaeb8148c54ed9799e87b833d0fa3ba456e2c6719 (diff)
downloadmongo-0d26400300a2c007c825c5a718e5484036adf3c8.tar.gz
SERVER-41468 Only shut down the LogicalSessionsCache if it has been created
(cherry picked from commit 5e998f021f3dc2f3da3da47d966f4411487c2058)
-rw-r--r--jstests/sharding/shard_aware_init.js41
-rw-r--r--src/mongo/s/server.cpp34
2 files changed, 37 insertions, 38 deletions
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 14e53209895..ba302783838 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -70,16 +70,14 @@
};
// Simulate the upsert that is performed by a config server on addShard.
- var shardIdentityQuery = {
- _id: shardIdentityDoc._id,
- shardName: shardIdentityDoc.shardName,
- clusterId: shardIdentityDoc.clusterId,
- };
- var shardIdentityUpdate = {
- $set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}
- };
assert.writeOK(mongodConn.getDB('admin').system.version.update(
- shardIdentityQuery, shardIdentityUpdate, {upsert: true}));
+ {
+ _id: shardIdentityDoc._id,
+ shardName: shardIdentityDoc.shardName,
+ clusterId: shardIdentityDoc.clusterId,
+ },
+ {$set: {configsvrConnectionString: shardIdentityDoc.configsvrConnectionString}},
+ {upsert: true}));
var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
@@ -146,20 +144,19 @@
var st = new ShardingTest({shards: 1});
- var mongod = MongoRunner.runMongod({shardsvr: ''});
-
- runTest(mongod, st.configRS.getURL());
-
- MongoRunner.stopMongod(mongod);
+ {
+ var mongod = MongoRunner.runMongod({shardsvr: ''});
+ runTest(mongod, st.configRS.getURL());
+ MongoRunner.stopMongod(mongod);
+ }
- var replTest = new ReplSetTest({nodes: 1});
- replTest.startSet({shardsvr: ''});
- replTest.initiate();
-
- runTest(replTest.getPrimary(), st.configRS.getURL());
-
- replTest.stopSet();
+ {
+ var replTest = new ReplSetTest({nodes: 1});
+ replTest.startSet({shardsvr: ''});
+ replTest.initiate();
+ runTest(replTest.getPrimary(), st.configRS.getURL());
+ replTest.stopSet();
+ }
st.stop();
-
})();
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index bbec88abfae..53293c690a5 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -190,7 +190,9 @@ void cleanupTask(ServiceContext* serviceContext) {
Client& client = cc();
// Join the logical session cache before the transport layer
- LogicalSessionCache::get(serviceContext)->joinOnShutDown();
+ if (auto lsc = LogicalSessionCache::get(serviceContext)) {
+ lsc->joinOnShutDown();
+ }
// Shutdown the TransportLayer so that new connections aren't accepted
if (auto tl = serviceContext->getTransportLayer()) {
@@ -396,7 +398,7 @@ private:
};
ExitCode runMongosServer(ServiceContext* serviceContext) {
- Client::initThread("mongosMain");
+ ThreadClient tc("mongosMain", serviceContext);
printShardingVersionInfo(false);
initWireSpec();
@@ -447,13 +449,13 @@ ExitCode runMongosServer(ServiceContext* serviceContext) {
quickExit(EXIT_BADOPTIONS);
}
- auto opCtx = cc().makeOperationContext();
+ LogicalClock::set(serviceContext, stdx::make_unique<LogicalClock>(serviceContext));
- auto logicalClock = stdx::make_unique<LogicalClock>(opCtx->getServiceContext());
- LogicalClock::set(opCtx->getServiceContext(), std::move(logicalClock));
+ auto opCtxHolder = tc->makeOperationContext();
+ auto const opCtx = opCtxHolder.get();
{
- Status status = initializeSharding(opCtx.get());
+ Status status = initializeSharding(opCtx);
if (!status.isOK()) {
if (status == ErrorCodes::CallbackCanceled) {
invariant(globalInShutdownDeprecated());
@@ -464,15 +466,15 @@ ExitCode runMongosServer(ServiceContext* serviceContext) {
return EXIT_SHARDING_ERROR;
}
- Grid::get(opCtx.get())
+ Grid::get(serviceContext)
->getBalancerConfiguration()
- ->refreshAndCheck(opCtx.get())
+ ->refreshAndCheck(opCtx)
.transitional_ignore();
}
startMongoSFTDC();
- Status status = AuthorizationManager::get(serviceContext)->initialize(opCtx.get());
+ Status status = AuthorizationManager::get(serviceContext)->initialize(opCtx);
if (!status.isOK()) {
error() << "Initializing authorization data failed: " << status;
return EXIT_SHARDING_ERROR;
@@ -486,17 +488,17 @@ ExitCode runMongosServer(ServiceContext* serviceContext) {
clusterCursorCleanupJob.go();
UserCacheInvalidator cacheInvalidatorThread(AuthorizationManager::get(serviceContext));
- {
- cacheInvalidatorThread.initialize(opCtx.get());
- cacheInvalidatorThread.go();
- }
+ cacheInvalidatorThread.initialize(opCtx);
+ cacheInvalidatorThread.go();
PeriodicTask::startRunningPeriodicTasks();
// Set up the periodic runner for background job execution
- auto runner = makePeriodicRunner(serviceContext);
- runner->startup();
- serviceContext->setPeriodicRunner(std::move(runner));
+ {
+ auto runner = makePeriodicRunner(serviceContext);
+ runner->startup();
+ serviceContext->setPeriodicRunner(std::move(runner));
+ }
SessionKiller::set(serviceContext,
std::make_shared<SessionKiller>(serviceContext, killSessionsRemote));