summaryrefslogtreecommitdiff
path: root/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
diff options
context:
space:
mode:
authorJason Carey <jcarey@argv.me>2018-02-07 15:21:47 -0500
committerJason Carey <jcarey@argv.me>2018-02-08 15:18:11 -0500
commit4ae174dd53adaea999715ffbe19c435d685bc412 (patch)
treec61751faa5ec48b96b96772abad14afc480c2f42 /jstests/auth/refresh_logical_session_cache_with_long_usernames.js
parentd337da259248c785f4014b565742300eb08ecd4f (diff)
downloadmongo-4ae174dd53adaea999715ffbe19c435d685bc412.tar.gz
SERVER-33158 Shrink LogicalSession refresh batches
The batches created by the LogicalSessionCache can exceed the 16mb bson size limit for bson on the wire. This will cause the refresh step to fail, preventing logical sessions from ever being synced to the global collection. This happens because we don't explicitly size our batches (we were relying on the write_cmd item batch limit, rather than a byte limit). Previously the write_cmd batch limit had been 1000 items, which allowed for 16k per record. The new limit is 100k, which gives a 160 byte budget we can exceed with very large user names (as we sync the lsid + the user@db name). By forcing a new 10k limit on username sizes used with logical sessions we can then ensure that a lower 1k limit will always be safe.
Diffstat (limited to 'jstests/auth/refresh_logical_session_cache_with_long_usernames.js')
-rw-r--r--jstests/auth/refresh_logical_session_cache_with_long_usernames.js43
1 files changed, 43 insertions, 0 deletions
diff --git a/jstests/auth/refresh_logical_session_cache_with_long_usernames.js b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
new file mode 100644
index 00000000000..d9bbed5324f
--- /dev/null
+++ b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
@@ -0,0 +1,43 @@
+// Verifies that we've fixed SERVER-33158 by creating large user lsid refresh records (via large
+// usernames)
+
+(function() {
+ 'use strict';
+
+ const mongod = MongoRunner.runMongod({auth: ""});
+
+ const refresh = {refreshLogicalSessionCacheNow: 1};
+ const startSession = {startSession: 1};
+
+ const admin = mongod.getDB('admin');
+ const db = mongod.getDB("test");
+ const config = mongod.getDB("config");
+
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+
+ const longUserName = "x".repeat(1000);
+
+ // Create a user with a long name, so that the refresh records have a chance to blow out the
+ // 16MB limit, if all the sessions are flushed in one batch
+ db.createUser({user: longUserName, pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ assert(db.auth(longUserName, 'pass'));
+
+ // 20k * 1k = 20mb which is greater than 16mb
+ const numSessions = 20000;
+ for (var i = 0; i < numSessions; i++) {
+ assert.commandWorked(admin.runCommand(startSession), "unable to start session");
+ }
+
+ assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
+
+ // Make sure we actually flushed the sessions
+ assert.eq(numSessions,
+ config.system.sessions.aggregate([{'$listSessions': {}}, {'$count': "count"}])
+ .next()
+ .count);
+
+ MongoRunner.stopMongod(mongod);
+})();