summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsamantharitter <samantha.ritter@10gen.com>2017-08-16 11:11:04 -0400
committersamantharitter <samantha.ritter@10gen.com>2017-08-18 14:01:33 -0400
commit9999bc8a0915a34c3fd898f757008b68c5c780a2 (patch)
tree45ceecdcfb47a2b1c8901e2173ebcd660f3b8916
parent8c26bd3b1416ffb804f0f80785be25d2b04511b8 (diff)
downloadmongo-9999bc8a0915a34c3fd898f757008b68c5c780a2.tar.gz
SERVER-29203 Implement SessionsCollectionSharded
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/replsets/refresh_sessions_rs.js40
-rw-r--r--jstests/sharding/refresh_sessions.js81
-rw-r--r--src/mongo/db/SConscript17
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/logical_session_cache_factory_mongod.cpp6
-rw-r--r--src/mongo/db/logical_session_cache_factory_mongos.cpp7
-rw-r--r--src/mongo/db/sessions_collection_sharded.cpp97
-rw-r--r--src/mongo/db/sessions_collection_sharded.h60
-rw-r--r--src/mongo/s/commands/SConscript2
-rw-r--r--src/mongo/s/write_ops/SConscript3
12 files changed, 282 insertions, 35 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 186ebc97775..566c23f0945 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -68,6 +68,7 @@ selector:
- jstests/sharding/listDatabases.js
- jstests/sharding/bulk_insert.js
- jstests/sharding/printShardingStatus.js
+ - jstests/sharding/refresh_sessions.js
# Balancer writes (direct write to config database with no retries)
- jstests/sharding/convert_to_and_from_sharded.js
- jstests/sharding/remove2.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index c42fb1a7f1a..8bcc55f14f2 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -32,6 +32,7 @@ selector:
- jstests/sharding/key_rotation.js
- jstests/sharding/kill_sessions.js
- jstests/sharding/advance_logical_time_with_valid_signature.js
+ - jstests/sharding/refresh_sessions.js
- jstests/sharding/safe_secondary_reads_drop_recreate.js
- jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
- jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
diff --git a/jstests/replsets/refresh_sessions_rs.js b/jstests/replsets/refresh_sessions_rs.js
index cfe506b6c3d..abb71e22534 100644
--- a/jstests/replsets/refresh_sessions_rs.js
+++ b/jstests/replsets/refresh_sessions_rs.js
@@ -17,59 +17,57 @@
var server2 = replTest.liveNodes.slaves[0];
var server3 = replTest.liveNodes.slaves[1];
- var admin1 = primary.getDB(dbName);
- var admin2 = server2.getDB(dbName);
- var admin3 = server3.getDB(dbName);
+ var db1 = primary.getDB(dbName);
+ var db2 = server2.getDB(dbName);
+ var db3 = server3.getDB(dbName);
var res;
// Trigger an initial refresh on all members, as a sanity check.
- res = admin1.runCommand(refresh);
+ res = db1.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
- res = admin2.runCommand(refresh);
+ res = db2.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
- res = admin3.runCommand(refresh);
+ res = db3.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
// Connect to the primary and start a session.
- admin1.runCommand(startSession);
+ db1.runCommand(startSession);
assert.commandWorked(res, "unable to start session");
- // That session should not be in admin.system.sessions yet.
- assert.eq(admin1.system.sessions.count(), 0, "should not have session records yet");
+ // That session should not be in db.system.sessions yet.
+ assert.eq(db1.system.sessions.count(), 0, "should not have session records yet");
// Connect to each replica set member and start a session.
- res = admin2.runCommand(startSession);
+ res = db2.runCommand(startSession);
assert.commandWorked(res, "unable to start session");
- res = admin3.runCommand(startSession);
+ res = db3.runCommand(startSession);
assert.commandWorked(res, "unable to start session");
// Connect to a secondary and trigger a refresh.
- res = admin2.runCommand(refresh);
+ res = db2.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
// Connect to the primary. The sessions collection here should now contain one record.
- assert.eq(admin1.system.sessions.count(),
- 1,
- "refreshing on the secondary did not flush record to the primary");
+ assert.eq(db1.system.sessions.count(), 1, "did not flush refresh to the primary");
// Trigger a refresh on the primary. The sessions collection should now contain two records.
- res = admin1.runCommand(refresh);
+ res = db1.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
assert.eq(
- admin1.system.sessions.count(), 2, "should have two local session records after refresh");
+ db1.system.sessions.count(), 2, "should have two local session records after refresh");
// Trigger another refresh on all members.
- res = admin1.runCommand(refresh);
+ res = db1.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
- res = admin2.runCommand(refresh);
+ res = db2.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
- res = admin3.runCommand(refresh);
+ res = db3.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
// The sessions collection on the primary should now contain all records.
assert.eq(
- admin1.system.sessions.count(), 3, "should have three local session records after refresh");
+ db1.system.sessions.count(), 3, "should have three local session records after refresh");
// Stop the test.
replTest.stopSet();
diff --git a/jstests/sharding/refresh_sessions.js b/jstests/sharding/refresh_sessions.js
new file mode 100644
index 00000000000..0e575fe6a69
--- /dev/null
+++ b/jstests/sharding/refresh_sessions.js
@@ -0,0 +1,81 @@
+(function() {
+ "use strict";
+
+ var sessionsDb = "admin";
+ var refresh = {refreshLogicalSessionCacheNow: 1};
+ var startSession = {startSession: 1};
+
+ // Create a cluster with 1 shard.
+ var cluster = new ShardingTest({shards: 2});
+
+ // Test that we can refresh without any sessions, as a sanity check.
+ {
+ assert.commandWorked(cluster.s.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard0.getDB(sessionsDb).runCommand(refresh));
+ assert.commandWorked(cluster.shard1.getDB(sessionsDb).runCommand(refresh));
+ }
+
+ // Test that refreshing on mongos flushes local records to the collection.
+ {
+ var mongos = cluster.s.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ // Start one session.
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(mongos.runCommand(refresh));
+
+ // Test that it landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session record");
+ }
+
+ // Test that refreshing on mongod flushes local records to the collection.
+ {
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard = cluster.shard0.getDB(sessionsDb);
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(shard.runCommand(startSession));
+ assert.commandWorked(shard.runCommand(refresh));
+
+ // Test that the new record landed in the collection.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongod did not flush session record");
+ }
+
+ // Test that refreshing on all servers flushes all records.
+ {
+ var mongos = cluster.s.getDB(sessionsDb);
+ var shard0 = cluster.shard0.getDB(sessionsDb);
+ var shard1 = cluster.shard1.getDB(sessionsDb);
+
+ var sessionCount = mongos.system.sessions.count();
+
+ assert.commandWorked(mongos.runCommand(startSession));
+ assert.commandWorked(shard0.runCommand(startSession));
+ assert.commandWorked(shard1.runCommand(startSession));
+
+ // All records should be in local caches only.
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount,
+ "startSession should not flush records to disk");
+
+ // Refresh on each server, see that it ups the session count.
+ assert.commandWorked(mongos.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 1,
+ "refresh on mongos did not flush session records to disk");
+
+ assert.commandWorked(shard0.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 2,
+ "refresh on shard did not flush session records to disk");
+
+ assert.commandWorked(shard1.runCommand(refresh));
+ assert.eq(mongos.system.sessions.count(),
+ sessionCount + 3,
+ "refresh on shard did not flush session records to disk");
+ }
+})();
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index e166bea6c81..85ac0195bb1 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1027,6 +1027,19 @@ env.Library(
)
env.Library(
+ target='sessions_collection_sharded',
+ source=[
+ 'sessions_collection_sharded.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/s/write_ops/cluster_write_op',
+ 'logical_session_id',
+ 'sessions_collection',
+ ],
+)
+
+env.Library(
target='logical_session_cache',
source=[
'logical_session_cache.cpp',
@@ -1069,8 +1082,8 @@ envWithAsio.Library(
LIBDEPS=[
'logical_session_cache',
'service_liason_mongod',
- 'sessions_collection_mock', # TODO SERVER-29203
'sessions_collection_rs',
+ 'sessions_collection_sharded',
'sessions_collection_standalone',
],
)
@@ -1083,7 +1096,7 @@ envWithAsio.Library(
LIBDEPS=[
'logical_session_cache',
'service_liason_mongos',
- 'sessions_collection_mock', # TODO SERVER-29203 replace with sessions_collection_sharded
+ 'sessions_collection_sharded',
],
)
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 9defa4f328f..681d8319226 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -770,7 +770,7 @@ ExitCode _initAndListen(int listenPort) {
// Set up the logical session cache
LogicalSessionCacheServer kind = LogicalSessionCacheServer::kStandalone;
- if (shardingInitialized) {
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
kind = LogicalSessionCacheServer::kSharded;
} else if (replSettings.usingReplSets()) {
kind = LogicalSessionCacheServer::kReplicaSet;
diff --git a/src/mongo/db/logical_session_cache_factory_mongod.cpp b/src/mongo/db/logical_session_cache_factory_mongod.cpp
index 52fc1ed1ce3..ee77bae1aa3 100644
--- a/src/mongo/db/logical_session_cache_factory_mongod.cpp
+++ b/src/mongo/db/logical_session_cache_factory_mongod.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/logical_session_cache_factory_mongod.h"
#include "mongo/db/service_liason_mongod.h"
-#include "mongo/db/sessions_collection_mock.h"
#include "mongo/db/sessions_collection_rs.h"
+#include "mongo/db/sessions_collection_sharded.h"
#include "mongo/db/sessions_collection_standalone.h"
#include "mongo/stdx/memory.h"
@@ -45,9 +45,7 @@ namespace {
std::unique_ptr<SessionsCollection> makeSessionsCollection(LogicalSessionCacheServer state) {
switch (state) {
case LogicalSessionCacheServer::kSharded:
- // TODO SERVER-29203, replace with SessionsCollectionSharded
- return stdx::make_unique<MockSessionsCollection>(
- std::make_shared<MockSessionsCollectionImpl>());
+ return stdx::make_unique<SessionsCollectionSharded>();
case LogicalSessionCacheServer::kReplicaSet:
return stdx::make_unique<SessionsCollectionRS>();
case LogicalSessionCacheServer::kStandalone:
diff --git a/src/mongo/db/logical_session_cache_factory_mongos.cpp b/src/mongo/db/logical_session_cache_factory_mongos.cpp
index bfcefd13b41..e96319f8c48 100644
--- a/src/mongo/db/logical_session_cache_factory_mongos.cpp
+++ b/src/mongo/db/logical_session_cache_factory_mongos.cpp
@@ -34,17 +34,14 @@
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_liason_mongos.h"
-#include "mongo/db/sessions_collection_mock.h"
+#include "mongo/db/sessions_collection_sharded.h"
#include "mongo/stdx/memory.h"
namespace mongo {
std::unique_ptr<LogicalSessionCache> makeLogicalSessionCacheS() {
auto liason = stdx::make_unique<ServiceLiasonMongos>();
-
- // TODO SERVER-29203, replace with SessionsCollectionSharded
- auto sessionsColl =
- stdx::make_unique<MockSessionsCollection>(std::make_shared<MockSessionsCollectionImpl>());
+ auto sessionsColl = stdx::make_unique<SessionsCollectionSharded>();
return stdx::make_unique<LogicalSessionCache>(
std::move(liason), std::move(sessionsColl), LogicalSessionCache::Options{});
diff --git a/src/mongo/db/sessions_collection_sharded.cpp b/src/mongo/db/sessions_collection_sharded.cpp
new file mode 100644
index 00000000000..46ad4ce5a21
--- /dev/null
+++ b/src/mongo/db/sessions_collection_sharded.cpp
@@ -0,0 +1,97 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/sessions_collection_sharded.h"
+
+#include "mongo/db/operation_context.h"
+#include "mongo/s/commands/cluster_write.h"
+#include "mongo/s/write_ops/batch_write_exec.h"
+#include "mongo/s/write_ops/batched_command_request.h"
+#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/util/net/op_msg.h"
+
+namespace mongo {
+
+namespace {
+
+BSONObj lsidQuery(const LogicalSessionId& lsid) {
+ return BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON());
+}
+
+} // namespace
+
+Status SessionsCollectionSharded::refreshSessions(OperationContext* opCtx,
+ const LogicalSessionRecordSet& sessions,
+ Date_t refreshTime) {
+ auto send = [&](BSONObj toSend) {
+ auto opMsg = OpMsgRequest::fromDBAndBody(SessionsCollection::kSessionsDb, toSend);
+ auto request = BatchedCommandRequest::parseUpdate(opMsg);
+
+ BatchedCommandResponse response;
+ BatchWriteExecStats stats;
+
+ ClusterWriter::write(opCtx, request, &stats, &response);
+ if (response.getOk()) {
+ return Status::OK();
+ }
+
+ auto error = response.isErrCodeSet() ? ErrorCodes::fromInt(response.getErrCode())
+ : ErrorCodes::UnknownError;
+ return Status(error, response.getErrMessage());
+ };
+
+ return doRefresh(sessions, refreshTime, send);
+}
+
+Status SessionsCollectionSharded::removeRecords(OperationContext* opCtx,
+ const LogicalSessionIdSet& sessions) {
+ auto send = [&](BSONObj toSend) {
+ auto opMsg = OpMsgRequest::fromDBAndBody(SessionsCollection::kSessionsDb, toSend);
+ auto request = BatchedCommandRequest::parseDelete(opMsg);
+
+ BatchedCommandResponse response;
+ BatchWriteExecStats stats;
+
+ ClusterWriter::write(opCtx, request, &stats, &response);
+
+ if (response.getOk()) {
+ return Status::OK();
+ }
+
+ auto error = response.isErrCodeSet() ? ErrorCodes::fromInt(response.getErrCode())
+ : ErrorCodes::UnknownError;
+ return Status(error, response.getErrMessage());
+ };
+
+ return doRemove(sessions, send);
+}
+
+
+} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_sharded.h b/src/mongo/db/sessions_collection_sharded.h
new file mode 100644
index 00000000000..ddf15ba11d6
--- /dev/null
+++ b/src/mongo/db/sessions_collection_sharded.h
@@ -0,0 +1,60 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <memory>
+
+#include "mongo/db/logical_session_id.h"
+#include "mongo/db/sessions_collection.h"
+#include "mongo/util/time_support.h"
+
+namespace mongo {
+
+class OperationContext;
+
+/**
+ * Accesses the sessions collection for mongos and shard servers.
+ */
+class SessionsCollectionSharded : public SessionsCollection {
+public:
+ /**
+ * Updates the last-use times on the given sessions to be greater than
+ * or equal to the current time.
+ */
+ Status refreshSessions(OperationContext* opCtx,
+ const LogicalSessionRecordSet& sessions,
+ Date_t refreshTime) override;
+
+ /**
+ * Removes the authoritative records for the specified sessions.
+ */
+ Status removeRecords(OperationContext* opCtx, const LogicalSessionIdSet& sessions) override;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index 75fb56c876b..d1eb29e7d4c 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -26,7 +26,6 @@ env.Library(
env.Library(
target='cluster_commands',
source=[
- 'chunk_manager_targeter.cpp',
'cluster_add_shard_cmd.cpp',
'cluster_add_shard_to_zone_cmd.cpp',
'cluster_aggregate.cpp',
@@ -77,7 +76,6 @@ env.Library(
'cluster_update_zone_key_range_cmd.cpp',
'cluster_user_management_commands.cpp',
'cluster_whats_my_uri_cmd.cpp',
- 'cluster_write.cpp',
'cluster_write_cmd.cpp',
'commands_public.cpp',
'kill_sessions_remote.cpp',
diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript
index 03d110b32ce..f13ba6c096f 100644
--- a/src/mongo/s/write_ops/SConscript
+++ b/src/mongo/s/write_ops/SConscript
@@ -27,12 +27,15 @@ env.Library(
'write_op.cpp',
'batch_write_op.cpp',
'batch_write_exec.cpp',
+ '$BUILD_DIR/mongo/s/commands/chunk_manager_targeter.cpp',
+ '$BUILD_DIR/mongo/s/commands/cluster_write.cpp',
],
LIBDEPS=[
'batch_write_types',
'$BUILD_DIR/mongo/client/connection_string',
'$BUILD_DIR/mongo/s/async_requests_sender',
'$BUILD_DIR/mongo/s/client/sharding_client',
+ '$BUILD_DIR/mongo/s/commands/shared_cluster_commands',
'$BUILD_DIR/mongo/s/coreshard',
],
)