summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Carey <jcarey@argv.me>2018-02-07 15:21:47 -0500
committerJason Carey <jcarey@argv.me>2018-02-08 18:02:11 -0500
commite872183bb267ab7202bad421523f9ec96e12d933 (patch)
tree57e43fbfd34ea07fa6d33ddadbad54d386af2ca9
parentf639bbb16ace7598865a889211ba89b8c5ccf6fe (diff)
downloadmongo-e872183bb267ab7202bad421523f9ec96e12d933.tar.gz
SERVER-33158 Shrink LogicalSession refresh batches
The batches created by the LogicalSessionCache can exceed the 16mb bson size limit for bson on the wire. This will cause the refresh step to fail, preventing logical sessions from ever being synced to the global collection. This happens because we don't explicitly size our batches (we were relying on the write_cmd item batch limit, rather than a byte limit). Previously the write_cmd batch limit had been 1000 items, which allowed for 16k per record. The new limit is 100k, which gives a 160 byte budget we can exceed with very large user names (as we sync the lsid + the user@db name). By forcing a new 10k limit on username sizes used with logical sessions we can then ensure that a lower 1k limit will always be safe. (cherry picked from commit 4ae174dd53adaea999715ffbe19c435d685bc412)
-rw-r--r--jstests/auth/refresh_logical_session_cache_with_long_usernames.js43
-rw-r--r--src/mongo/db/logical_session_id_helpers.cpp4
-rw-r--r--src/mongo/db/logical_session_id_helpers.h2
-rw-r--r--src/mongo/db/logical_session_id_test.cpp11
-rw-r--r--src/mongo/db/sessions_collection.cpp10
5 files changed, 69 insertions, 1 deletions
diff --git a/jstests/auth/refresh_logical_session_cache_with_long_usernames.js b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
new file mode 100644
index 00000000000..d9bbed5324f
--- /dev/null
+++ b/jstests/auth/refresh_logical_session_cache_with_long_usernames.js
@@ -0,0 +1,43 @@
+// Verifies that we've fixed SERVER-33158 by creating large user lsid refresh records (via large
+// usernames)
+
+(function() {
+ 'use strict';
+
+ const mongod = MongoRunner.runMongod({auth: ""});
+
+ const refresh = {refreshLogicalSessionCacheNow: 1};
+ const startSession = {startSession: 1};
+
+ const admin = mongod.getDB('admin');
+ const db = mongod.getDB("test");
+ const config = mongod.getDB("config");
+
+ admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(admin.auth('admin', 'pass'));
+
+ const longUserName = "x".repeat(1000);
+
+ // Create a user with a long name, so that the refresh records have a chance to blow out the
+ // 16MB limit, if all the sessions are flushed in one batch
+ db.createUser({user: longUserName, pwd: 'pass', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ assert(db.auth(longUserName, 'pass'));
+
+ // 20k * 1k = 20mb which is greater than 16mb
+ const numSessions = 20000;
+ for (var i = 0; i < numSessions; i++) {
+ assert.commandWorked(admin.runCommand(startSession), "unable to start session");
+ }
+
+ assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
+
+ // Make sure we actually flushed the sessions
+ assert.eq(numSessions,
+ config.system.sessions.aggregate([{'$listSessions': {}}, {'$count': "count"}])
+ .next()
+ .count);
+
+ MongoRunner.stopMongod(mongod);
+})();
diff --git a/src/mongo/db/logical_session_id_helpers.cpp b/src/mongo/db/logical_session_id_helpers.cpp
index 7f89ca518df..32f743d0e42 100644
--- a/src/mongo/db/logical_session_id_helpers.cpp
+++ b/src/mongo/db/logical_session_id_helpers.cpp
@@ -55,6 +55,10 @@ SHA256Block getLogicalSessionUserDigestForLoggedInUser(const OperationContext* o
const auto user = AuthorizationSession::get(client)->getSingleUser();
invariant(user);
+ uassert(ErrorCodes::BadValue,
+ "Username too long to use with logical sessions",
+ user->getName().getFullName().length() < kMaximumUserNameLengthForLogicalSessions);
+
return user->getDigest();
} else {
return kNoAuthDigest;
diff --git a/src/mongo/db/logical_session_id_helpers.h b/src/mongo/db/logical_session_id_helpers.h
index 9735706330b..7e08503432b 100644
--- a/src/mongo/db/logical_session_id_helpers.h
+++ b/src/mongo/db/logical_session_id_helpers.h
@@ -36,6 +36,8 @@
namespace mongo {
+constexpr size_t kMaximumUserNameLengthForLogicalSessions = 10000;
+
/**
* Get the currently logged in user's UID digest.
*/
diff --git a/src/mongo/db/logical_session_id_test.cpp b/src/mongo/db/logical_session_id_test.cpp
index 8f7e96943bb..26302a53df1 100644
--- a/src/mongo/db/logical_session_id_test.cpp
+++ b/src/mongo/db/logical_session_id_test.cpp
@@ -335,5 +335,16 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SupportsDocLockingFa
ErrorCodes::IllegalOperation);
}
+TEST_F(LogicalSessionIdTest, ConstructorFromClientWithTooLongName) {
+ auto id = UUID::gen();
+
+ addSimpleUser(UserName(std::string(kMaximumUserNameLengthForLogicalSessions + 1, 'x'), "test"));
+
+ LogicalSessionFromClient req;
+ req.setId(id);
+
+ ASSERT_THROWS(makeLogicalSessionId(req, _opCtx.get()), AssertionException);
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/sessions_collection.cpp b/src/mongo/db/sessions_collection.cpp
index ec0d2f43a8b..49644651e23 100644
--- a/src/mongo/db/sessions_collection.cpp
+++ b/src/mongo/db/sessions_collection.cpp
@@ -48,6 +48,14 @@ namespace mongo {
namespace {
+// This batch size is chosen to ensure that we don't form requests larger than the 16mb limit.
+// Especially for refreshes, the updates we send include the full user name (user@db), and user
+// names can be quite large (we enforce a max 10k limit for usernames used with sessions).
+//
+// At 1000 elements, a 16mb payload gives us a budget of 16000 bytes per user, which we should
+// comfortably be able to stay under, even with 10k user names.
+constexpr size_t kMaxBatchSize = 1000;
+
BSONObj lsidQuery(const LogicalSessionId& lsid) {
return BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON());
}
@@ -94,7 +102,7 @@ Status runBulkGeneric(TFactory makeT, AddLineFn addLine, SendFn sendBatch, const
for (const auto& item : items) {
addLine(*thing, item);
- if (++i >= write_ops::kMaxWriteBatchSize) {
+ if (++i >= kMaxBatchSize) {
auto res = sendLocalBatch();
if (!res.isOK()) {
return res;