summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py5
-rw-r--r--jstests/auth/list_all_sessions.js4
-rw-r--r--jstests/libs/sessions_collection.js33
-rw-r--r--jstests/noPassthrough/sessions_collection_auto_healing.js33
-rw-r--r--jstests/noPassthrough/stepdown_query.js4
-rw-r--r--jstests/noPassthrough/system_indexes.js13
-rw-r--r--jstests/noPassthrough/transaction_reaper.js3
-rw-r--r--jstests/noPassthroughWithMongod/replica_set_shard_version.js91
-rw-r--r--jstests/replsets/refresh_sessions_rs.js8
-rw-r--r--jstests/replsets/sessions_collection_auto_healing.js84
-rw-r--r--jstests/sharding/count_slaveok.js5
-rw-r--r--jstests/sharding/group_slaveok.js5
-rw-r--r--jstests/sharding/read_pref.js4
-rw-r--r--jstests/sharding/rename.js3
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js140
-rw-r--r--jstests/sharding/shard_identity_config_update.js5
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js3
-rw-r--r--jstests/sslSpecial/SERVER-26369.js11
-rw-r--r--src/mongo/db/SConscript19
-rw-r--r--src/mongo/db/catalog/database_impl.cpp4
-rw-r--r--src/mongo/db/create_indexes.idl88
-rw-r--r--src/mongo/db/db.cpp2
-rw-r--r--src/mongo/db/logical_session_cache_factory_mongod.cpp3
-rw-r--r--src/mongo/db/logical_session_cache_factory_mongod.h2
-rw-r--r--src/mongo/db/logical_session_cache_impl.cpp43
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp7
-rw-r--r--src/mongo/db/ops/insert.cpp6
-rw-r--r--src/mongo/db/sessions_collection.cpp17
-rw-r--r--src/mongo/db/sessions_collection.h10
-rw-r--r--src/mongo/db/sessions_collection_config_server.cpp117
-rw-r--r--src/mongo/db/sessions_collection_config_server.h65
-rw-r--r--src/mongo/db/sessions_collection_mock.h4
-rw-r--r--src/mongo/db/sessions_collection_rs.cpp23
-rw-r--r--src/mongo/db/sessions_collection_rs.h5
-rw-r--r--src/mongo/db/sessions_collection_sharded.cpp23
-rw-r--r--src/mongo/db/sessions_collection_sharded.h9
-rw-r--r--src/mongo/db/sessions_collection_standalone.cpp12
-rw-r--r--src/mongo/db/sessions_collection_standalone.h5
-rw-r--r--src/mongo/db/system_index.cpp46
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp23
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp37
-rw-r--r--src/mongo/s/client/shard_registry.cpp6
-rw-r--r--src/mongo/s/client/shard_registry.h2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp39
-rw-r--r--src/mongo/shell/shardingtest.js14
48 files changed, 933 insertions, 161 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 9b6a4fa45f8..f0c180017c7 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -141,6 +141,8 @@ selector:
# Stepping down the config can cause moveChunks stopped on shards via killOp to be restarted.
- jstests/sharding/migration_ignore_interrupts_3.js
- jstests/sharding/migration_ignore_interrupts_4.js
+ # listCollections is not retryable
+ - jstests/sharding/sessions_collection_auto_healing.js
executor:
config:
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index f92636ef968..34363ddd337 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -54,6 +54,7 @@ selector:
- jstests/sharding/secondary_shard_version_protocol_with_fcv.js
- jstests/sharding/secondary_shard_versioning.js
- jstests/sharding/session_info_in_oplog.js
+ - jstests/sharding/sessions_collection_auto_healing.js
- jstests/sharding/shard_config_db_collections.js
- jstests/sharding/uuid_propagated_to_config_server_on_shardCollection.js
- jstests/sharding/uuid_propagated_to_recipient_shard_on_recvChunkStart.js
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 19ca140ba83..4f90d16f517 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -134,6 +134,11 @@ class ShardedClusterFixture(interface.Fixture):
self.logger.info("Enabling sharding for '%s' database...", db_name)
client.admin.command({"enablesharding": db_name})
+ # Ensure that the sessions collection gets auto-sharded by the config server
+ if self.configsvr is not None:
+ primary = self.configsvr.get_primary().mongo_client()
+ primary.admin.command({ "refreshLogicalSessionCacheNow" : 1 })
+
def _do_teardown(self):
"""
Shuts down the sharded cluster.
diff --git a/jstests/auth/list_all_sessions.js b/jstests/auth/list_all_sessions.js
index 2fd47476ee9..763178e9ac1 100644
--- a/jstests/auth/list_all_sessions.js
+++ b/jstests/auth/list_all_sessions.js
@@ -53,6 +53,10 @@
const st =
new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}});
+
+ // Ensure that the sessions collection exists.
+ st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+
runListAllSessionsTest(st.s0);
st.stop();
})();
diff --git a/jstests/libs/sessions_collection.js b/jstests/libs/sessions_collection.js
new file mode 100644
index 00000000000..c25c72c8193
--- /dev/null
+++ b/jstests/libs/sessions_collection.js
@@ -0,0 +1,33 @@
+// Helpers for testing the logical sessions collection.
+
+/**
+ * Validates that the sessions collection exists if we expect it to,
+ * and has a TTL index on the lastUse field, if we expect it to.
+ */
+function validateSessionsCollection(conn, collectionExists, indexExists) {
+ var config = conn.getDB("config");
+
+ var info = config.getCollectionInfos({name: "system.sessions"});
+ var size = collectionExists ? 1 : 0;
+ assert.eq(info.length, size);
+
+ var indexes = config.system.sessions.getIndexes();
+ var found = false;
+ for (var i = 0; i < indexes.length; i++) {
+ var entry = indexes[i];
+ if (entry["name"] == "lsidTTLIndex") {
+ found = true;
+
+ assert.eq(entry["ns"], "config.system.sessions");
+ assert.eq(entry["key"], {"lastUse": 1});
+ assert(entry.hasOwnProperty("expireAfterSeconds"));
+ }
+ }
+
+ if (indexExists) {
+ assert(collectionExists);
+ assert(found, "expected sessions collection TTL index to exist");
+ } else {
+ assert(!found, "TTL index on sessions collection exists");
+ }
+}
diff --git a/jstests/noPassthrough/sessions_collection_auto_healing.js b/jstests/noPassthrough/sessions_collection_auto_healing.js
new file mode 100644
index 00000000000..9e2e2df3dc6
--- /dev/null
+++ b/jstests/noPassthrough/sessions_collection_auto_healing.js
@@ -0,0 +1,33 @@
+load('jstests/libs/sessions_collection.js');
+
+(function() {
+ "use strict";
+
+ var startSession = {startSession: 1};
+ var conn = MongoRunner.runMongod({nojournal: ""});
+
+ var admin = conn.getDB("admin");
+ var config = conn.getDB("config");
+
+ // Test that we can use sessions before the sessions collection exists.
+ {
+ validateSessionsCollection(conn, false, false);
+ assert.commandWorked(admin.runCommand({startSession: 1}));
+ validateSessionsCollection(conn, false, false);
+ }
+
+ // Test that a refresh will create the sessions collection.
+ {
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true);
+ }
+
+ // Test that a refresh will (re)create the TTL index on the sessions collection.
+ {
+ assert.commandWorked(config.system.sessions.dropIndex({lastUse: 1}));
+ validateSessionsCollection(conn, true, false);
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true);
+ }
+
+})();
diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js
index 0faf9ecfe30..cce6499e043 100644
--- a/jstests/noPassthrough/stepdown_query.js
+++ b/jstests/noPassthrough/stepdown_query.js
@@ -2,6 +2,10 @@
* Tests that a query with default read preference ("primary") will succeed even if the node being
* queried steps down before the final result batch has been delivered.
*/
+
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
'use strict';
diff --git a/jstests/noPassthrough/system_indexes.js b/jstests/noPassthrough/system_indexes.js
index 95f9d90c4a2..ddf291d4b56 100644
--- a/jstests/noPassthrough/system_indexes.js
+++ b/jstests/noPassthrough/system_indexes.js
@@ -65,17 +65,4 @@
db = conn.getDB("admin");
assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Inserting to the sessions collection creates indexes
- config = conn.getDB("config");
- assert.eq(0, config.system.sessions.getIndexes().length);
- config.system.sessions.insert({lastUse: new Date()});
- assert.eq(2, config.system.sessions.getIndexes().length);
-
- // TEST: Destroying config.system.sessions index and restarting will recreate it
- assert.commandWorked(config.system.sessions.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- config = conn.getDB("config");
- assert.eq(2, config.system.sessions.getIndexes().length);
-
})();
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index 8fbaae0aed0..b777b80d6b3 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -36,6 +36,9 @@
this.st.s0.getDB("admin").runCommand({enableSharding: "test"});
this.st.s0.getDB("admin").runCommand({shardCollection: "test.test", key: {_id: 1}});
+
+ // Ensure that the sessions collection exists.
+ this.st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
}
Sharding.prototype.stop = function() {
diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
index b8fe681cc06..73c520c14ac 100644
--- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js
+++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
@@ -1,61 +1,64 @@
-// Tests whether a Replica Set in a mongos cluster can cause versioning problems
+/**
+ * Tests whether a Replica Set in a mongos cluster can cause versioning problems.
+ */
-jsTestLog("Starting sharded cluster...");
+// Checking UUID consistency involves talking to a shard node, which in this test has been
+// stepped-down
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-var st = new ShardingTest({shards: 1, mongos: 2, other: {rs: true}});
+(function() {
+ 'use strict';
-// Uncomment to stop the balancer, since the balancer usually initializes the shard automatically
-// SERVER-4921 is otherwise hard to manifest
-// st.stopBalancer()
+ var st = new ShardingTest({shards: 1, mongos: 2, other: {rs: true, enableBalancer: true}});
-var mongosA = st.s0;
-var mongosB = st.s1;
-var shard = st.shard0;
+ var mongosA = st.s0;
+ var mongosB = st.s1;
+ var shard = st.shard0;
-coll = mongosA.getCollection(jsTestName() + ".coll");
+ var coll = mongosA.getCollection(jsTestName() + ".coll");
-// Wait for primary and then initialize shard SERVER-5130
-st.rs0.getPrimary();
-coll.findOne();
-
-var sadmin = shard.getDB("admin");
-assert.throws(function() {
- sadmin.runCommand({replSetStepDown: 3000, force: true});
-});
+ // Wait for primary and then initialize shard SERVER-5130
+ st.rs0.getPrimary();
+ coll.findOne();
-st.rs0.getPrimary();
+ var sadmin = shard.getDB("admin");
+ assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+ });
-mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
+ st.rs0.getPrimary();
-try {
- // This _almost_ always fails, unless the new primary is already detected. If if fails, it
- // should
- // mark the master as bad, so mongos will reload the replica set master next request
- // TODO: Can we just retry and succeed here?
- coll.findOne();
-} catch (e) {
- print("This error is expected : ");
- printjson(e);
-}
+ mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
-jsTest.log("Running query which should succeed...");
+ try {
+ // This _almost_ always fails, unless the new primary is already detected. If if fails, it
+ // should mark the master as bad, so mongos will reload the replica set master next request.
+ //
+ // TODO: Can we just retry and succeed here?
+ coll.findOne();
+ } catch (e) {
+ print("This error is expected : ");
+ printjson(e);
+ }
-// This should always succeed without throwing an error
-coll.findOne();
+ jsTest.log("Running query which should succeed...");
-mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: false});
+ // This should always succeed without throwing an error
+ coll.findOne();
-// now check secondary
+ mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: false});
-assert.throws(function() {
- sadmin.runCommand({replSetStepDown: 3000, force: true});
-});
+ // Now check secondary
+ assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+ });
-// Can't use the mongosB - SERVER-5128
-other = new Mongo(mongosA.host);
-other.setSlaveOk(true);
-other = other.getCollection(jsTestName() + ".coll");
+ // Can't use the mongosB - SERVER-5128
+ var other = new Mongo(mongosA.host);
+ other.setSlaveOk(true);
+ other = other.getCollection(jsTestName() + ".coll");
-print("eliot: " + tojson(other.findOne()));
+ print("eliot: " + tojson(other.findOne()));
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/replsets/refresh_sessions_rs.js b/jstests/replsets/refresh_sessions_rs.js
index cc44affe6a2..2083c015515 100644
--- a/jstests/replsets/refresh_sessions_rs.js
+++ b/jstests/replsets/refresh_sessions_rs.js
@@ -48,8 +48,8 @@
res = db2.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
- // Connect to the primary. The sessions collection here should now contain one record.
- assert.eq(db1.system.sessions.count(), 1, "did not flush refresh to the primary");
+ // Connect to the primary. The sessions collection here should not yet contain records.
+ assert.eq(db1.system.sessions.count(), 0, "flushed refresh to the primary prematurely");
// Trigger a refresh on the primary. The sessions collection should now contain two records.
res = db1.runCommand(refresh);
@@ -58,12 +58,12 @@
db1.system.sessions.count(), 2, "should have two local session records after refresh");
// Trigger another refresh on all members.
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
res = db2.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
res = db3.runCommand(refresh);
assert.commandWorked(res, "failed to refresh");
+ res = db1.runCommand(refresh);
+ assert.commandWorked(res, "failed to refresh");
// The sessions collection on the primary should now contain all records.
assert.eq(
diff --git a/jstests/replsets/sessions_collection_auto_healing.js b/jstests/replsets/sessions_collection_auto_healing.js
new file mode 100644
index 00000000000..ef396737c2b
--- /dev/null
+++ b/jstests/replsets/sessions_collection_auto_healing.js
@@ -0,0 +1,84 @@
+load('jstests/libs/sessions_collection.js');
+
+(function() {
+ "use strict";
+
+ var replTest = new ReplSetTest({name: 'refresh', nodes: 3});
+ var nodes = replTest.startSet();
+
+ replTest.initiate();
+ var primary = replTest.getPrimary();
+ var primaryAdmin = primary.getDB("admin");
+
+ replTest.awaitSecondaryNodes();
+ var secondary = replTest.liveNodes.slaves[0];
+ var secondaryAdmin = secondary.getDB("admin");
+
+ // Test that we can use sessions on the primary
+ // before the sessions collection exists.
+ {
+ validateSessionsCollection(primary, false, false);
+
+ assert.commandWorked(primaryAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(primary, false, false);
+ }
+
+ // Test that we can use sessions on secondaries
+ // before the sessions collection exists.
+ {
+ validateSessionsCollection(primary, false, false);
+ validateSessionsCollection(secondary, false, false);
+
+ assert.commandWorked(secondaryAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(primary, false, false);
+ validateSessionsCollection(secondary, false, false);
+ }
+
+ // Test that a refresh on a secondary does not create the sessions
+ // collection, on either the secondary or the primary.
+ {
+ validateSessionsCollection(primary, false, false);
+ validateSessionsCollection(secondary, false, false);
+
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(primary, false, false);
+ validateSessionsCollection(secondary, false, false);
+ }
+
+ // Test that a refresh on the primary creates the sessions collection.
+ {
+ validateSessionsCollection(primary, false, false);
+ validateSessionsCollection(secondary, false, false);
+
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(primary, true, true);
+ }
+
+ // Test that a refresh on a secondary will not create the
+ // TTL index on the sessions collection.
+ {
+ assert.commandWorked(primary.getDB("config").system.sessions.dropIndex({lastUse: 1}));
+
+ validateSessionsCollection(primary, true, false);
+
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(primary, true, false);
+ }
+
+ // Test that a refresh on the primary will create the
+ // TTL index on the sessions collection.
+ {
+ validateSessionsCollection(primary, true, false);
+
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(primary, true, true);
+ }
+
+ replTest.stopSet();
+})();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index df24e1418ed..f93ed7e0fa6 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -2,6 +2,10 @@
* Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
* secondary is up.
*/
+
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
'use strict';
@@ -61,7 +65,6 @@
print("Should not reach here!");
assert(false);
-
} catch (e) {
print("Non-slaveOk'd connection failed.");
}
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 154a52128d8..3fb3d6272ae 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -1,6 +1,10 @@
/**
* Tests group using slaveOk.
*/
+
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
'use strict';
@@ -66,5 +70,4 @@
}
st.stop();
-
})();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index dcbf28676d1..2ca144099f0 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -2,6 +2,10 @@
* Integration test for read preference and tagging. The more comprehensive unit test can be found
* in dbtests/replica_set_monitor_test.cpp.
*/
+
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
'use strict';
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 9189e131359..e574dbb6c97 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -1,3 +1,6 @@
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
'use strict';
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
new file mode 100644
index 00000000000..0c7ab95fa42
--- /dev/null
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -0,0 +1,140 @@
+load('jstests/libs/sessions_collection.js');
+
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({shards: 0});
+ var configSvr = st.configRS.getPrimary();
+ var configAdmin = configSvr.getDB("admin");
+
+ var mongos = st.s;
+ var mongosAdmin = mongos.getDB("admin");
+ var mongosConfig = mongos.getDB("config");
+
+ // Test that we can use sessions on the config server before we add any shards.
+ {
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+
+ assert.commandWorked(configAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+ }
+
+ // Test that we can use sessions on a mongos before we add any shards.
+ {
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(mongos, false, false);
+ }
+
+ // Test that the config server does not create the sessions collection
+ // if there are not any shards.
+ {
+ assert.eq(mongosConfig.shards.count(), 0);
+
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ }
+
+ // Test-wide: add a shard
+ var rs = new ReplSetTest({nodes: 1});
+ rs.startSet({shardsvr: ""});
+ rs.initiate();
+
+ var shard = rs.getPrimary();
+ var shardAdmin = shard.getDB("admin");
+ var shardConfig = shard.getDB("config");
+
+ // Test that we can add this shard, even with a local config.system.sessions collection,
+ // and test that we drop its local collection
+ {
+ shardConfig.system.sessions.insert({"hey": "you"});
+ validateSessionsCollection(shard, true, false);
+
+ assert.commandWorked(mongosAdmin.runCommand({addShard: rs.getURL()}));
+ assert.eq(mongosConfig.shards.count(), 1);
+ validateSessionsCollection(shard, false, false);
+ }
+
+ // Test that we can use sessions on a shard before the sessions collection
+ // is set up by the config servers.
+ {
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+
+ assert.commandWorked(shardAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ }
+
+ // Test that we can use sessions from a mongos before the sessions collection
+ // is set up by the config servers.
+ {
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
+
+ assert.commandWorked(mongosAdmin.runCommand({startSession: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ validateSessionsCollection(mongos, false, false);
+ }
+
+ // Test that if we do a refresh (write) from a shard server while there
+ // is no sessions collection, it does not create the sessions collection.
+ {
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(configSvr, false, false);
+ validateSessionsCollection(shard, false, false);
+ }
+
+ // Test that a refresh on the config servers once there are shards creates
+ // the sessions collection on a shard.
+ {
+ validateSessionsCollection(shard, false, false);
+
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+
+ validateSessionsCollection(shard, true, true);
+
+ assert.eq(shardConfig.system.sessions.count(), 1, "did not flush config's sessions");
+
+ // Now, if we do refreshes on the other servers, their in-mem records will
+ // be written to the collection.
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 2, "did not flush shard's sessions");
+
+ assert.commandWorked(mongosAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.eq(shardConfig.system.sessions.count(), 4, "did not flush mongos' sessions");
+ }
+
+ // Test that if we drop the index on the sessions collection,
+ // refresh on neither the shard nor the config db heals it.
+ {
+ assert.commandWorked(shardConfig.system.sessions.dropIndex({lastUse: 1}));
+
+ validateSessionsCollection(shard, true, false);
+
+ assert.commandWorked(configAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, false);
+
+ assert.commandWorked(shardAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(shard, true, false);
+ }
+
+ st.stop();
+
+})();
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index dccdd03c669..b7c3453134f 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -3,6 +3,10 @@
* primary and secondary will get updated whenever the config server membership changes.
* @tags: [requires_persistence]
*/
+
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
"use strict";
@@ -98,5 +102,4 @@
});
st.stop();
-
})();
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index b8e1ae1c9af..aba43f8e2e7 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -6,6 +6,9 @@
* @tags: [requires_persistence]
*/
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
// Run through the same test twice, once with a hard -9 kill, once with a regular shutdown
(function() {
'use strict';
diff --git a/jstests/sslSpecial/SERVER-26369.js b/jstests/sslSpecial/SERVER-26369.js
index 96dd0d15183..065f646c3fc 100644
--- a/jstests/sslSpecial/SERVER-26369.js
+++ b/jstests/sslSpecial/SERVER-26369.js
@@ -1,11 +1,12 @@
-'use strict';
+// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
(function() {
+ 'use strict';
+
load("jstests/ssl/libs/ssl_helpers.js");
- var st = new ShardingTest({
- shards: {rs0: {nodes: 1}},
- mongos: 1,
- });
+ var st = new ShardingTest({shards: {rs0: {nodes: 1}}});
st.rs0.restart(0, {
sslMode: "allowSSL",
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index c7f3ce2a887..73a03f0e499 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -574,7 +574,6 @@ env.Library(
LIBDEPS=[
'db_raii',
'catalog/index_key_validate',
- 'logical_session_cache',
],
)
@@ -977,6 +976,7 @@ env.Library(
target='sessions_collection',
source=[
'sessions_collection.cpp',
+ env.Idlc('create_indexes.idl')[0],
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
@@ -1045,6 +1045,22 @@ env.Library(
)
env.Library(
+ target='sessions_collection_config_server',
+ source=[
+ 'sessions_collection_config_server.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/s/client/sharding_client',
+ '$BUILD_DIR/mongo/s/commands/shared_cluster_commands',
+ '$BUILD_DIR/mongo/s/coreshard',
+ '$BUILD_DIR/mongo/s/sharding_request_types',
+ 'dbdirectclient',
+ 'sessions_collection',
+ 'sessions_collection_sharded',
+ ],
+)
+
+env.Library(
target='logical_session_cache',
source=[
'logical_session_cache.cpp',
@@ -1122,6 +1138,7 @@ envWithAsio.Library(
'logical_session_cache',
'logical_session_cache_impl',
'service_liason_mongod',
+ 'sessions_collection_config_server',
'sessions_collection_rs',
'sessions_collection_sharded',
'sessions_collection_standalone',
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index 87630ec52b1..b7d2df997ca 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -64,6 +64,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/sessions_collection.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/storage/recovery_unit.h"
#include "mongo/db/storage/storage_engine.h"
@@ -451,7 +452,8 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx,
if (_profile != 0)
return Status(ErrorCodes::IllegalOperation,
"turn off profiling before dropping system.profile collection");
- } else if (!(nss.isSystemDotViews() || nss.isHealthlog())) {
+ } else if (!(nss.isSystemDotViews() || nss.isHealthlog() ||
+ nss == SessionsCollection::kSessionsNamespaceString)) {
return Status(ErrorCodes::IllegalOperation,
str::stream() << "can't drop system collection " << fullns);
}
diff --git a/src/mongo/db/create_indexes.idl b/src/mongo/db/create_indexes.idl
new file mode 100644
index 00000000000..decbf9e6c9b
--- /dev/null
+++ b/src/mongo/db/create_indexes.idl
@@ -0,0 +1,88 @@
+# Copyright (C) 2017 MongoDB Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License, version 3,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# This IDL file describes the BSON format for a LogicalSessionId, and
+# handles the serialization to and deserialization from its BSON representation
+# for that class.
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+
+structs:
+
+ NewIndexSpec:
+ description: "A type representing a spec for a new index"
+ strict: true
+ fields:
+ key: object
+ name: string
+ background:
+ type: bool
+ optional: true
+ unique:
+ type: bool
+ optional: true
+ partialFilterExpression:
+ type: object
+ optional: true
+ sparse:
+ type: bool
+ optional: true
+ expireAfterSeconds:
+ type: int
+ optional: true
+ storageEngine:
+ type: object
+ optional: true
+ weights:
+ type: object
+ optional: true
+ default_language:
+ type: string
+ optional: true
+ language_override:
+ type: string
+ optional: true
+ textIndexVersion:
+ type: int
+ optional: true
+ 2dsphereIndexVersion:
+ type: int
+ optional: true
+ bits:
+ type: int
+ optional: true
+ min:
+ type: double
+ optional: true
+ max:
+ type: double
+ optional: true
+ bucketSize:
+ type: double
+ optional: true
+ collation:
+ type: object
+ optional: true
+
+ CreateIndexesCmd:
+ description: "A struct representing a createIndexes command"
+ strict: false
+ fields:
+ createIndexes: string
+ indexes: array<NewIndexSpec>
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 26fd4faab3b..d2cc915c0a0 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -804,6 +804,8 @@ ExitCode _initAndListen(int listenPort) {
LogicalSessionCacheServer kind = LogicalSessionCacheServer::kStandalone;
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
kind = LogicalSessionCacheServer::kSharded;
+ } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ kind = LogicalSessionCacheServer::kConfigServer;
} else if (replSettings.usingReplSets()) {
kind = LogicalSessionCacheServer::kReplicaSet;
}
diff --git a/src/mongo/db/logical_session_cache_factory_mongod.cpp b/src/mongo/db/logical_session_cache_factory_mongod.cpp
index 0fdc1135d56..2672b41a4cc 100644
--- a/src/mongo/db/logical_session_cache_factory_mongod.cpp
+++ b/src/mongo/db/logical_session_cache_factory_mongod.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/logical_session_cache_impl.h"
#include "mongo/db/service_liason_mongod.h"
+#include "mongo/db/sessions_collection_config_server.h"
#include "mongo/db/sessions_collection_rs.h"
#include "mongo/db/sessions_collection_sharded.h"
#include "mongo/db/sessions_collection_standalone.h"
@@ -51,6 +52,8 @@ std::shared_ptr<SessionsCollection> makeSessionsCollection(LogicalSessionCacheSe
switch (state) {
case LogicalSessionCacheServer::kSharded:
return std::make_shared<SessionsCollectionSharded>();
+ case LogicalSessionCacheServer::kConfigServer:
+ return std::make_shared<SessionsCollectionConfigServer>();
case LogicalSessionCacheServer::kReplicaSet:
return std::make_shared<SessionsCollectionRS>();
case LogicalSessionCacheServer::kStandalone:
diff --git a/src/mongo/db/logical_session_cache_factory_mongod.h b/src/mongo/db/logical_session_cache_factory_mongod.h
index b6ac0430fd0..faee5c56a1a 100644
--- a/src/mongo/db/logical_session_cache_factory_mongod.h
+++ b/src/mongo/db/logical_session_cache_factory_mongod.h
@@ -35,7 +35,7 @@
namespace mongo {
-enum class LogicalSessionCacheServer { kSharded, kReplicaSet, kStandalone };
+enum class LogicalSessionCacheServer { kSharded, kConfigServer, kReplicaSet, kStandalone };
class ServiceContext;
diff --git a/src/mongo/db/logical_session_cache_impl.cpp b/src/mongo/db/logical_session_cache_impl.cpp
index e52285edbbe..6cb5287f32b 100644
--- a/src/mongo/db/logical_session_cache_impl.cpp
+++ b/src/mongo/db/logical_session_cache_impl.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/db/logical_session_id_helpers.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
@@ -115,8 +116,6 @@ Status LogicalSessionCacheImpl::refreshSessions(OperationContext* opCtx,
Status LogicalSessionCacheImpl::refreshSessions(OperationContext* opCtx,
const RefreshSessionsCmdFromClusterMember& cmd) {
- LogicalSessionRecordSet toRefresh{};
-
// Update the timestamps of all these records in our cache.
auto records = cmd.getRefreshSessionsInternal();
for (const auto& record : records) {
@@ -124,11 +123,9 @@ Status LogicalSessionCacheImpl::refreshSessions(OperationContext* opCtx,
// This is a new record, insert it.
_addToCache(record);
}
- toRefresh.insert(record);
}
- // Write to the sessions collection now.
- return _sessionsColl->refreshSessions(opCtx, toRefresh);
+ return Status::OK();
}
void LogicalSessionCacheImpl::vivify(OperationContext* opCtx, const LogicalSessionId& lsid) {
@@ -201,6 +198,30 @@ Status LogicalSessionCacheImpl::_reap(Client* client) {
}
void LogicalSessionCacheImpl::_refresh(Client* client) {
+ // Do not run this job if we are not in FCV 3.6
+ if (!serverGlobalParams.featureCompatibility.isFullyUpgradedTo36()) {
+ LOG(1) << "Skipping session refresh job while feature compatibility version is not 3.6";
+ return;
+ }
+
+ // get or make an opCtx
+ boost::optional<ServiceContext::UniqueOperationContext> uniqueCtx;
+ auto* const opCtx = [&client, &uniqueCtx] {
+ if (client->getOperationContext()) {
+ return client->getOperationContext();
+ }
+
+ uniqueCtx.emplace(client->makeOperationContext());
+ return uniqueCtx->get();
+ }();
+
+ auto res = _sessionsColl->setupSessionsCollection(opCtx);
+ if (!res.isOK()) {
+ log() << "Sessions collection is not set up; "
+ << "waiting until next sessions refresh interval: " << res.reason();
+ return;
+ }
+
LogicalSessionIdSet staleSessions;
LogicalSessionIdSet explicitlyEndingSessions;
LogicalSessionIdMap<LogicalSessionRecord> activeSessions;
@@ -229,18 +250,6 @@ void LogicalSessionCacheImpl::_refresh(Client* client) {
auto activeSessionsBackSwapper = backSwapper(_activeSessions, activeSessions);
auto explicitlyEndingBackSwaper = backSwapper(_endingSessions, explicitlyEndingSessions);
- // get or make an opCtx
-
- boost::optional<ServiceContext::UniqueOperationContext> uniqueCtx;
- auto* const opCtx = [&client, &uniqueCtx] {
- if (client->getOperationContext()) {
- return client->getOperationContext();
- }
-
- uniqueCtx.emplace(client->makeOperationContext());
- return uniqueCtx->get();
- }();
-
// remove all explicitlyEndingSessions from activeSessions
for (const auto& lsid : explicitlyEndingSessions) {
activeSessions.erase(lsid);
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index 9001ab59a9a..5e96394a515 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -45,6 +45,7 @@
#include "mongo/db/sessions_collection_mock.h"
#include "mongo/stdx/future.h"
#include "mongo/stdx/memory.h"
+#include "mongo/unittest/ensure_fcv.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -55,6 +56,7 @@ const Milliseconds kForceRefresh =
duration_cast<Milliseconds>(LogicalSessionCacheImpl::kLogicalSessionDefaultRefresh);
using SessionList = std::list<LogicalSessionId>;
+using unittest::EnsureFCV;
/**
* Test fixture that sets up a session cache attached to a mock service liason
@@ -64,7 +66,8 @@ class LogicalSessionCacheTest : public unittest::Test {
public:
LogicalSessionCacheTest()
: _service(std::make_shared<MockServiceLiasonImpl>()),
- _sessions(std::make_shared<MockSessionsCollectionImpl>()) {}
+ _sessions(std::make_shared<MockSessionsCollectionImpl>()),
+ _fcv(EnsureFCV::Version::k36) {}
void setUp() override {
auto localManagerState = stdx::make_unique<AuthzManagerExternalStateMock>();
@@ -137,6 +140,8 @@ private:
std::unique_ptr<LogicalSessionCache> _cache;
Client* _client;
+
+ EnsureFCV _fcv;
};
// Test that the getFromCache method does not make calls to the sessions collection
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index de02fda5642..8ed9abbd634 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -261,6 +261,12 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
// some special rules
if (coll.find(".system.") != string::npos) {
+ // If this is metadata for the sessions collection, shard servers need to be able to
+ // write to it.
+ if (coll.find(".system.sessions") != string::npos) {
+ return Status::OK();
+ }
+
// this matches old (2.4 and older) behavior, but I'm not sure its a good idea
return Status(ErrorCodes::BadValue,
str::stream() << "cannot write to '" << db << "." << coll << "'");
diff --git a/src/mongo/db/sessions_collection.cpp b/src/mongo/db/sessions_collection.cpp
index 69e420f40b8..ec0d2f43a8b 100644
--- a/src/mongo/db/sessions_collection.cpp
+++ b/src/mongo/db/sessions_collection.cpp
@@ -35,6 +35,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/client/dbclientinterface.h"
+#include "mongo/db/create_indexes_gen.h"
#include "mongo/db/logical_session_id.h"
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/refresh_sessions_gen.h"
@@ -204,6 +205,7 @@ Status SessionsCollection::doRefresh(const NamespaceString& ns,
auto init = [ns](BSONObjBuilder* batch) {
batch->append("update", ns.coll());
batch->append("ordered", false);
+ batch->append("allowImplicitCollectionCreation", false);
};
auto add = [](BSONArrayBuilder* entries, const LogicalSessionRecord& record) {
@@ -309,4 +311,19 @@ StatusWith<LogicalSessionIdSet> SessionsCollection::doFetch(const NamespaceStrin
return removed;
}
+BSONObj SessionsCollection::generateCreateIndexesCmd() {
+ NewIndexSpec index;
+ index.setKey(BSON("lastUse" << 1));
+ index.setName("lsidTTLIndex");
+ index.setExpireAfterSeconds(localLogicalSessionTimeoutMinutes * 60);
+
+ std::vector<NewIndexSpec> indexes;
+ indexes.push_back(std::move(index));
+
+ CreateIndexesCmd createIndexes;
+ createIndexes.setCreateIndexes(kSessionsCollection.toString());
+ createIndexes.setIndexes(std::move(indexes));
+
+ return createIndexes.toBSON();
+}
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection.h b/src/mongo/db/sessions_collection.h
index 63fd34667cf..3f6baaac2d0 100644
--- a/src/mongo/db/sessions_collection.h
+++ b/src/mongo/db/sessions_collection.h
@@ -56,6 +56,11 @@ public:
static const NamespaceString kSessionsNamespaceString;
/**
+ * Ensures that the sessions collection exists and has the proper indexes.
+ */
+ virtual Status setupSessionsCollection(OperationContext* opCtx) = 0;
+
+ /**
* Updates the last-use times on the given sessions to be greater than
* or equal to the given time. Returns an error if a networking issue occurred.
*/
@@ -83,6 +88,11 @@ public:
virtual StatusWith<LogicalSessionIdSet> findRemovedSessions(
OperationContext* opCtx, const LogicalSessionIdSet& sessions) = 0;
+ /**
+ * Generates a createIndexes command for the sessions collection TTL index.
+ */
+ static BSONObj generateCreateIndexesCmd();
+
protected:
/**
* Makes a send function for the given client.
diff --git a/src/mongo/db/sessions_collection_config_server.cpp b/src/mongo/db/sessions_collection_config_server.cpp
new file mode 100644
index 00000000000..9051a73f849
--- /dev/null
+++ b/src/mongo/db/sessions_collection_config_server.cpp
@@ -0,0 +1,117 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kControl
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/sessions_collection_config_server.h"
+
+#include "mongo/client/query.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/logical_session_id.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/commands/cluster_commands_helpers.h"
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/shard_collection_gen.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+
+// Returns an error if the collection didn't exist and we couldn't
+// shard it into existence, either.
+Status SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext* opCtx) {
+ // First, check if the collection is already sharded.
+ auto res = _checkCacheForSessionsCollection(opCtx);
+ if (res.isOK()) {
+ return res;
+ }
+
+ // If we don't have any shards, we can't set up this collection yet.
+ if (Grid::get(opCtx)->shardRegistry()->getNumShards() == 0) {
+ return {ErrorCodes::ShardNotFound,
+ "Cannot create config.system.sessions until there are shards"};
+ }
+
+ // First, shard the sessions collection to create it.
+ ConfigsvrShardCollectionRequest shardCollection;
+ shardCollection.set_configsvrShardCollection(
+ NamespaceString(SessionsCollection::kSessionsFullNS.toString()));
+ shardCollection.setKey(BSON("_id" << 1));
+
+ DBDirectClient client(opCtx);
+ BSONObj info;
+ if (!client.runCommand("admin", shardCollection.toBSON(), info)) {
+ return getStatusFromCommandResult(info);
+ }
+
+ return Status::OK();
+}
+
+Status SessionsCollectionConfigServer::_generateIndexesIfNeeded(OperationContext* opCtx) {
+ auto res =
+ scatterGatherOnlyVersionIfUnsharded(opCtx,
+ SessionsCollection::kSessionsDb.toString(),
+ NamespaceString(SessionsCollection::kSessionsFullNS),
+ SessionsCollection::generateCreateIndexesCmd(),
+ ReadPreferenceSetting::get(opCtx),
+ Shard::RetryPolicy::kNoRetry);
+ return res.getStatus();
+}
+
+Status SessionsCollectionConfigServer::setupSessionsCollection(OperationContext* opCtx) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ {
+ // Only try to set up this collection until we have done so successfully once.
+ // Note: if there is a config server election, it's possible that two different
+ // primaries could both run the createIndexes scatter-gather query; this is ok.
+ if (_collectionSetUp) {
+ return Status::OK();
+ }
+
+ auto res = _shardCollectionIfNeeded(opCtx);
+ if (!res.isOK()) {
+ log() << "Failed to create config.system.sessions: " << res.reason()
+ << ", will try again at the next refresh interval";
+ return res;
+ }
+
+ res = _generateIndexesIfNeeded(opCtx);
+ if (!res.isOK()) {
+ log() << "Failed to generate TTL index for config.system.sessions on all shards, "
+ << "will try again on the next refresh interval";
+ }
+
+ _collectionSetUp = true;
+ return res;
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_config_server.h b/src/mongo/db/sessions_collection_config_server.h
new file mode 100644
index 00000000000..2de7c109e41
--- /dev/null
+++ b/src/mongo/db/sessions_collection_config_server.h
@@ -0,0 +1,65 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <memory>
+
+#include "mongo/db/logical_session_id.h"
+#include "mongo/db/sessions_collection_sharded.h"
+#include "mongo/stdx/mutex.h"
+#include "mongo/util/time_support.h"
+
+namespace mongo {
+
+class OperationContext;
+
+/**
+ * Accesses the sessions collection for config servers.
+ */
+class SessionsCollectionConfigServer : public SessionsCollectionSharded {
+public:
+ /**
+ * Ensures that the sessions collection has been set up for this cluster,
+ * sharded, and with the proper indexes.
+ *
+ * This method may safely be called multiple times.
+ *
+ * If there are no shards in this cluster, this method will do nothing.
+ */
+ Status setupSessionsCollection(OperationContext* opCtx) override;
+
+private:
+ Status _shardCollectionIfNeeded(OperationContext* opCtx);
+ Status _generateIndexesIfNeeded(OperationContext* opCtx);
+
+ stdx::mutex _mutex;
+ bool _collectionSetUp;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_mock.h b/src/mongo/db/sessions_collection_mock.h
index fb20533ed60..ff2c536467c 100644
--- a/src/mongo/db/sessions_collection_mock.h
+++ b/src/mongo/db/sessions_collection_mock.h
@@ -104,6 +104,10 @@ public:
explicit MockSessionsCollection(std::shared_ptr<MockSessionsCollectionImpl> impl)
: _impl(std::move(impl)) {}
+ Status setupSessionsCollection(OperationContext* opCtx) override {
+ return Status::OK();
+ }
+
Status refreshSessions(OperationContext* opCtx,
const LogicalSessionRecordSet& sessions) override {
return _impl->refreshSessions(sessions);
diff --git a/src/mongo/db/sessions_collection_rs.cpp b/src/mongo/db/sessions_collection_rs.cpp
index 4cfbffff3c3..cc65d9df574 100644
--- a/src/mongo/db/sessions_collection_rs.cpp
+++ b/src/mongo/db/sessions_collection_rs.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/stdx/memory.h"
namespace mongo {
@@ -131,6 +132,28 @@ auto dispatch(const NamespaceString& ns,
} // namespace
+Status SessionsCollectionRS::setupSessionsCollection(OperationContext* opCtx) {
+ return dispatch(kSessionsNamespaceString,
+ MODE_IX,
+ opCtx,
+ [&] {
+ // Creating the TTL index will auto-generate the collection.
+ DBDirectClient client(opCtx);
+ BSONObj info;
+ auto cmd = generateCreateIndexesCmd();
+ if (!client.runCommand(kSessionsDb.toString(), cmd, info)) {
+ return getStatusFromCommandResult(info);
+ }
+
+ return Status::OK();
+ },
+ [&](DBClientBase*) {
+ // If we are not the primary, we aren't going to do writes
+ // anyway, so just return ok.
+ return Status::OK();
+ });
+}
+
Status SessionsCollectionRS::refreshSessions(OperationContext* opCtx,
const LogicalSessionRecordSet& sessions) {
return dispatch(
diff --git a/src/mongo/db/sessions_collection_rs.h b/src/mongo/db/sessions_collection_rs.h
index 08459d6ff4f..88645bed5ad 100644
--- a/src/mongo/db/sessions_collection_rs.h
+++ b/src/mongo/db/sessions_collection_rs.h
@@ -54,6 +54,11 @@ public:
SessionsCollectionRS() = default;
/**
+ * Ensures that the sessions collection exists and has the proper indexes.
+ */
+ Status setupSessionsCollection(OperationContext* opCtx) override;
+
+ /**
* Updates the last-use times on the given sessions to be greater than
* or equal to the current time.
*/
diff --git a/src/mongo/db/sessions_collection_sharded.cpp b/src/mongo/db/sessions_collection_sharded.cpp
index bbcf195d8dd..de7699eb962 100644
--- a/src/mongo/db/sessions_collection_sharded.cpp
+++ b/src/mongo/db/sessions_collection_sharded.cpp
@@ -35,7 +35,10 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_request.h"
#include "mongo/db/sessions_collection_rs.h"
+#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/s/catalog_cache.h"
#include "mongo/s/commands/cluster_write.h"
+#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_find.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -52,6 +55,26 @@ BSONObj lsidQuery(const LogicalSessionId& lsid) {
} // namespace
+Status SessionsCollectionSharded::_checkCacheForSessionsCollection(OperationContext* opCtx) {
+ // If the collection doesn't exist, fail. Only the config servers generate it.
+ auto res = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(
+ opCtx, NamespaceString(SessionsCollection::kSessionsFullNS.toString()));
+ if (!res.isOK()) {
+ return res.getStatus();
+ }
+
+ auto routingInfo = res.getValue();
+ if (routingInfo.cm()) {
+ return Status::OK();
+ }
+
+ return {ErrorCodes::NamespaceNotFound, "config.system.sessions is not yet sharded"};
+}
+
+Status SessionsCollectionSharded::setupSessionsCollection(OperationContext* opCtx) {
+ return _checkCacheForSessionsCollection(opCtx);
+}
+
Status SessionsCollectionSharded::refreshSessions(OperationContext* opCtx,
const LogicalSessionRecordSet& sessions) {
auto send = [&](BSONObj toSend) {
diff --git a/src/mongo/db/sessions_collection_sharded.h b/src/mongo/db/sessions_collection_sharded.h
index 01ac730e09a..d8f5c6cf3f2 100644
--- a/src/mongo/db/sessions_collection_sharded.h
+++ b/src/mongo/db/sessions_collection_sharded.h
@@ -44,6 +44,12 @@ class OperationContext;
class SessionsCollectionSharded : public SessionsCollection {
public:
/**
+ * Ensures that the sessions collection exists, is sharded,
+ * and has the proper indexes.
+ */
+ Status setupSessionsCollection(OperationContext* opCtx) override;
+
+ /**
* Updates the last-use times on the given sessions to be greater than
* or equal to the current time.
*/
@@ -60,6 +66,9 @@ public:
Status removeTransactionRecords(OperationContext* opCtx,
const LogicalSessionIdSet& sessions) override;
+
+protected:
+ Status _checkCacheForSessionsCollection(OperationContext* opCtx);
};
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection_standalone.cpp b/src/mongo/db/sessions_collection_standalone.cpp
index 84c2b4daae8..8fcc2acfbee 100644
--- a/src/mongo/db/sessions_collection_standalone.cpp
+++ b/src/mongo/db/sessions_collection_standalone.cpp
@@ -34,6 +34,7 @@
#include "mongo/client/query.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/operation_context.h"
+#include "mongo/rpc/get_status_from_command_result.h"
namespace mongo {
@@ -44,6 +45,17 @@ BSONObj lsidQuery(const LogicalSessionId& lsid) {
}
} // namespace
+Status SessionsCollectionStandalone::setupSessionsCollection(OperationContext* opCtx) {
+ DBDirectClient client(opCtx);
+ auto cmd = generateCreateIndexesCmd();
+ BSONObj info;
+ if (!client.runCommand(kSessionsDb.toString(), cmd, info)) {
+ return getStatusFromCommandResult(info);
+ }
+
+ return Status::OK();
+}
+
Status SessionsCollectionStandalone::refreshSessions(OperationContext* opCtx,
const LogicalSessionRecordSet& sessions) {
DBDirectClient client(opCtx);
diff --git a/src/mongo/db/sessions_collection_standalone.h b/src/mongo/db/sessions_collection_standalone.h
index ad5f155dd64..9f12516f2ff 100644
--- a/src/mongo/db/sessions_collection_standalone.h
+++ b/src/mongo/db/sessions_collection_standalone.h
@@ -43,6 +43,11 @@ class OperationContext;
class SessionsCollectionStandalone : public SessionsCollection {
public:
/**
+ * Ensures that the sessions collection exists and has the proper indexes.
+ */
+ Status setupSessionsCollection(OperationContext* opCtx) override;
+
+ /**
* Updates the last-use times on the given sessions to be greater than
* or equal to the current time.
*/
diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp
index be95f1d0758..ccc6ccab06c 100644
--- a/src/mongo/db/system_index.cpp
+++ b/src/mongo/db/system_index.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/logical_session_cache.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
@@ -59,13 +58,10 @@ namespace {
BSONObj v1SystemUsersKeyPattern;
BSONObj v3SystemUsersKeyPattern;
BSONObj v3SystemRolesKeyPattern;
-BSONObj v1SystemSessionsKeyPattern;
std::string v3SystemUsersIndexName;
std::string v3SystemRolesIndexName;
-std::string v1SystemSessionsIndexName;
IndexSpec v3SystemUsersIndexSpec;
IndexSpec v3SystemRolesIndexSpec;
-IndexSpec v1SystemSessionsIndexSpec;
const NamespaceString sessionCollectionNamespace("config.system.sessions");
@@ -77,7 +73,6 @@ MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v3SystemRolesKeyPattern = BSON(
AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
<< 1);
- v1SystemSessionsKeyPattern = BSON("lastUse" << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
<< AuthorizationManager::USER_DB_FIELD_NAME
@@ -86,7 +81,6 @@ MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
<< AuthorizationManager::ROLE_DB_FIELD_NAME
<< "_1");
- v1SystemSessionsIndexName = "lastUse_1";
v3SystemUsersIndexSpec.addKeys(v3SystemUsersKeyPattern);
v3SystemUsersIndexSpec.unique();
@@ -96,11 +90,6 @@ MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v3SystemRolesIndexSpec.unique();
v3SystemRolesIndexSpec.name(v3SystemRolesIndexName);
- v1SystemSessionsIndexSpec.addKeys(v1SystemSessionsKeyPattern);
- v1SystemSessionsIndexSpec.expireAfterSeconds(
- durationCount<Seconds>(Minutes(localLogicalSessionTimeoutMinutes)));
- v1SystemSessionsIndexSpec.name(v1SystemSessionsIndexName);
-
return Status::OK();
}
@@ -200,33 +189,6 @@ Status verifySystemIndexes(OperationContext* opCtx) {
}
}
- // Create indexes for system collections in the config db.
- {
- AutoGetDb autoDb(opCtx, sessionCollectionNamespace.db(), MODE_X);
- if (!autoDb.getDb()) {
- return Status::OK();
- }
-
- // Ensure that system indexes exist for the sessions collection, if it exists.
- auto collection = autoDb.getDb()->getCollection(opCtx, sessionCollectionNamespace);
- if (collection) {
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- invariant(indexCatalog);
-
- std::vector<IndexDescriptor*> indexes;
- indexCatalog->findIndexesByKeyPattern(
- opCtx, v1SystemSessionsKeyPattern, false, &indexes);
- if (indexes.empty()) {
- try {
- generateSystemIndexForExistingCollection(
- opCtx, collection, sessionCollectionNamespace, v1SystemSessionsIndexSpec);
- } catch (...) {
- return exceptionToStatus();
- }
- }
- }
- }
-
return Status::OK();
}
@@ -249,14 +211,6 @@ void createSystemIndexes(OperationContext* opCtx, Collection* collection) {
fassertStatusOK(
40458, collection->getIndexCatalog()->createIndexOnEmptyCollection(opCtx, indexSpec));
- } else if (ns == sessionCollectionNamespace) {
- auto indexSpec = fassertStatusOK(
- 40493,
- index_key_validate::validateIndexSpec(
- v1SystemSessionsIndexSpec.toBSON(), ns, serverGlobalParams.featureCompatibility));
-
- fassertStatusOK(
- 40494, collection->getIndexCatalog()->createIndexOnEmptyCollection(opCtx, indexSpec));
}
}
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
index 4c2e0e6e61b..5ce39eba1bf 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
@@ -120,6 +120,19 @@ protected:
});
}
+ void expectCollectionDrop(const HostAndPort& target, const NamespaceString& nss) {
+ onCommandForAddShard([&](const RemoteCommandRequest& request) {
+ ASSERT_EQ(request.target, target);
+ ASSERT_EQ(request.dbname, nss.db());
+ ASSERT_BSONOBJ_EQ(request.cmdObj,
+ BSON("drop" << nss.coll() << "writeConcern" << BSON("w"
+ << "majority")));
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
+
+ return BSON("ok" << 1);
+ });
+ }
+
void expectSetFeatureCompatibilityVersion(const HostAndPort& target,
StatusWith<BSONObj> response) {
onCommandForAddShard([&, target, response](const RemoteCommandRequest& request) {
@@ -401,6 +414,8 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
+ expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions"));
+
// The shardIdentity doc inserted into the admin.system.version collection on the shard.
expectShardIdentityUpsertReturnSuccess(shardTarget, expectedShardName);
@@ -484,6 +499,8 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
+ expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions"));
+
// The shardIdentity doc inserted into the admin.system.version collection on the shard.
expectShardIdentityUpsertReturnSuccess(shardTarget, expectedShardName);
@@ -909,6 +926,8 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
// Get databases list from new shard
expectListDatabases(shardTarget, std::vector<BSONObj>{BSON("name" << discoveredDB.getName())});
+ expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions"));
+
// The shardIdentity doc inserted into the admin.system.version collection on the shard.
expectShardIdentityUpsertReturnSuccess(shardTarget, expectedShardName);
@@ -974,6 +993,8 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
// Get databases list from new shard
expectListDatabases(shardTarget, std::vector<BSONObj>{BSON("name" << discoveredDB.getName())});
+ expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions"));
+
// The shardIdentity doc inserted into the admin.system.version collection on the shard.
expectShardIdentityUpsertReturnSuccess(shardTarget, expectedShardName);
@@ -1055,6 +1076,8 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
BSON("name" << discoveredDB1.getName() << "sizeOnDisk" << 2000),
BSON("name" << discoveredDB2.getName() << "sizeOnDisk" << 5000)});
+ expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions"));
+
// The shardIdentity doc inserted into the admin.system.version collection on the shard.
expectShardIdentityUpsertReturnSuccess(shardTarget, expectedShardName);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index e49fd77f727..ae5d181f58c 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -381,6 +381,12 @@ private:
const ConnectionString& connectionString);
/**
+ * Drops the sessions collection on the specified host.
+ */
+ Status _dropSessionsCollection(OperationContext* opCtx,
+ std::shared_ptr<RemoteCommandTargeter> targeter);
+
+ /**
* Runs the listDatabases command on the specified host and returns the names of all databases
* it returns excluding those named local, config and admin, since they serve administrative
* purposes.
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
index e1ebbee383a..6aaa1b4f8bc 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations.cpp
@@ -52,6 +52,7 @@
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/type_shard_identity.h"
+#include "mongo/db/sessions_collection.h"
#include "mongo/db/wire_version.h"
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -468,6 +469,30 @@ StatusWith<ShardType> ShardingCatalogManager::_validateHostAsShard(
return shard;
}
+Status ShardingCatalogManager::_dropSessionsCollection(
+ OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter) {
+
+ BSONObjBuilder builder;
+ builder.append("drop", SessionsCollection::kSessionsCollection.toString());
+ {
+ BSONObjBuilder wcBuilder(builder.subobjStart("writeConcern"));
+ wcBuilder.append("w", "majority");
+ }
+
+ auto swCommandResponse = _runCommandForAddShard(
+ opCtx, targeter.get(), SessionsCollection::kSessionsDb.toString(), builder.done());
+ if (!swCommandResponse.isOK()) {
+ return swCommandResponse.getStatus();
+ }
+
+ auto cmdStatus = std::move(swCommandResponse.getValue().commandStatus);
+ if (!cmdStatus.isOK() && cmdStatus.code() != ErrorCodes::NamespaceNotFound) {
+ return cmdStatus;
+ }
+
+ return Status::OK();
+}
+
StatusWith<std::vector<std::string>> ShardingCatalogManager::_getDBNamesListFromShard(
OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter) {
@@ -592,6 +617,18 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
}
}
+ // Check that the shard candidate does not have a local config.system.sessions collection
+ auto res = _dropSessionsCollection(opCtx, targeter);
+
+ if (!res.isOK()) {
+ return Status(
+ res.code(),
+ str::stream()
+ << "can't add shard with a local copy of config.system.sessions due to "
+ << res.reason()
+ << ", please drop this collection from the shard manually and try again.");
+ }
+
// If a name for a shard wasn't provided, generate one
if (shardType.getName().empty()) {
auto result = generateNewShardName(opCtx);
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index a64eace6ba4..82c5471def1 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -170,6 +170,12 @@ void ShardRegistry::getAllShardIds(vector<ShardId>* all) const {
all->assign(seen.begin(), seen.end());
}
+int ShardRegistry::getNumShards() const {
+ std::set<ShardId> seen;
+ _data.getAllShardIds(seen);
+ return seen.size();
+}
+
void ShardRegistry::toBSON(BSONObjBuilder* result) const {
_data.toBSON(result);
}
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 13b48b95ce4..af1f11e8f06 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -233,6 +233,8 @@ public:
std::shared_ptr<Shard> lookupRSName(const std::string& name) const;
void getAllShardIds(std::vector<ShardId>* all) const;
+ int getNumShards() const;
+
void toBSON(BSONObjBuilder* result) const;
bool isUp() const;
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index 4e5cea30cd2..b441f68c4fa 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -31,59 +31,48 @@
#include "mongo/platform/basic.h"
#include <string>
-#include <vector>
-#include "mongo/client/connpool.h"
#include "mongo/db/commands.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/commands/cluster_commands_helpers.h"
#include "mongo/s/grid.h"
#include "mongo/util/log.h"
namespace mongo {
-
-using std::string;
-using std::vector;
-
namespace {
class RemoveShardCmd : public BasicCommand {
public:
RemoveShardCmd() : BasicCommand("removeShard", "removeshard") {}
- virtual bool slaveOk() const {
- return false;
+ void help(std::stringstream& help) const override {
+ help << "remove a shard from the system.";
}
- virtual bool adminOnly() const {
- return true;
+ bool slaveOk() const override {
+ return false;
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool adminOnly() const override {
return true;
}
- virtual void help(std::stringstream& help) const {
- help << "remove a shard from the system.";
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
+ return true;
}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) override {
ActionSet actions;
actions.addAction(ActionType::removeShard);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
-
+ bool run(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) override {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Field '" << cmdObj.firstElement().fieldName()
<< "' must be of type string",
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 5ee8132e2c9..cc46c16b7c5 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -1506,6 +1506,20 @@ var ShardingTest = function(params) {
return true;
}, "waiting for all mongos servers to return cluster times", 60 * 1000, 500);
}
+
+ // Ensure that the sessions collection exists so jstests can run things with
+ // logical sessions and test them. We do this by forcing an immediate cache refresh
+ // on the config server, which auto-shards the collection for the cluster.
+ var lastStableBinVersion = MongoRunner.getBinVersionFor('last-stable');
+ if ((!otherParams.configOptions) ||
+ (otherParams.configOptions && !otherParams.configOptions.binVersion) ||
+ (otherParams.configOptions && otherParams.configOptions.binVersion &&
+ MongoRunner.areBinVersionsTheSame(
+ lastStableBinVersion,
+ MongoRunner.getBinVersionFor(otherParams.configOptions.binVersion)))) {
+ this.configRS.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ }
+
};
// Stub for a hook to check that collection UUIDs are consistent across shards and the config