summaryrefslogtreecommitdiff
path: root/patches/0004-futex-Avoid-redundant-task-lookup.patch
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-09-03 11:46:56 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-09-03 11:46:56 +0200
commite15fbe1c7f108a4ad49d69a931d6166aa401cb9a (patch)
tree5f6cfcd325f21a57da2ba4b7ed33bce82c5892ba /patches/0004-futex-Avoid-redundant-task-lookup.patch
parentda9e10bf0b05a02d1ef4447e677eba697af32ec9 (diff)
downloadlinux-rt-e15fbe1c7f108a4ad49d69a931d6166aa401cb9a.tar.gz
[ANNOUNCE] v5.14-rt17v5.14-rt17-patches
Dear RT folks! I'm pleased to announce the v5.14-rt17 patch set. Changes since v5.14-rt16: - Teach lockdep to recognize read-locks on configurations without QRW locks. - Various futex patches addressing fallout reported by syzbot caused by the locking rework. Patches by Thomas Gleixner. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. - Clark Williams reported issues in i915 (execlists_dequeue_irq()) - Valentin Schneider reported a few splats on ARM64, see https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/ The delta patch against v5.14-rt16 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rt16-rt17.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rt17 The RT patch against v5.14 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rt17.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rt17.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/0004-futex-Avoid-redundant-task-lookup.patch')
-rw-r--r--patches/0004-futex-Avoid-redundant-task-lookup.patch122
1 files changed, 122 insertions, 0 deletions
diff --git a/patches/0004-futex-Avoid-redundant-task-lookup.patch b/patches/0004-futex-Avoid-redundant-task-lookup.patch
new file mode 100644
index 000000000000..ae99fce2404d
--- /dev/null
+++ b/patches/0004-futex-Avoid-redundant-task-lookup.patch
@@ -0,0 +1,122 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 2 Sep 2021 11:48:51 +0200
+Subject: [PATCH 4/4] futex: Avoid redundant task lookup
+
+No need to do the full VPID based task lookup and validation of the top
+waiter when the user space futex was acquired on it's behalf during the
+requeue_pi operation. The task is known already and it cannot go away
+before requeue_pi_wake_futex() has been invoked.
+
+Split out the actual attach code from attach_pi_state_owner() and use that
+instead of the full blown variant.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20210902094414.676104881@linutronix.de
+---
+ kernel/futex.c | 67 +++++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 37 insertions(+), 30 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1263,6 +1263,36 @@ static int handle_exit_race(u32 __user *
+ return -ESRCH;
+ }
+
++static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key,
++ struct futex_pi_state **ps)
++{
++ /*
++ * No existing pi state. First waiter. [2]
++ *
++ * This creates pi_state, we have hb->lock held, this means nothing can
++ * observe this state, wait_lock is irrelevant.
++ */
++ struct futex_pi_state *pi_state = alloc_pi_state();
++
++ /*
++ * Initialize the pi_mutex in locked state and make @p
++ * the owner of it:
++ */
++ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
++
++ /* Store the key for possible exit cleanups: */
++ pi_state->key = *key;
++
++ WARN_ON(!list_empty(&pi_state->list));
++ list_add(&pi_state->list, &p->pi_state_list);
++ /*
++ * Assignment without holding pi_state->pi_mutex.wait_lock is safe
++ * because there is no concurrency as the object is not published yet.
++ */
++ pi_state->owner = p;
++
++ *ps = pi_state;
++}
+ /*
+ * Lookup the task for the TID provided from user space and attach to
+ * it after doing proper sanity checks.
+@@ -1272,7 +1302,6 @@ static int attach_to_pi_owner(u32 __user
+ struct task_struct **exiting)
+ {
+ pid_t pid = uval & FUTEX_TID_MASK;
+- struct futex_pi_state *pi_state;
+ struct task_struct *p;
+
+ /*
+@@ -1324,36 +1353,11 @@ static int attach_to_pi_owner(u32 __user
+ return ret;
+ }
+
+- /*
+- * No existing pi state. First waiter. [2]
+- *
+- * This creates pi_state, we have hb->lock held, this means nothing can
+- * observe this state, wait_lock is irrelevant.
+- */
+- pi_state = alloc_pi_state();
+-
+- /*
+- * Initialize the pi_mutex in locked state and make @p
+- * the owner of it:
+- */
+- rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
+-
+- /* Store the key for possible exit cleanups: */
+- pi_state->key = *key;
+-
+- WARN_ON(!list_empty(&pi_state->list));
+- list_add(&pi_state->list, &p->pi_state_list);
+- /*
+- * Assignment without holding pi_state->pi_mutex.wait_lock is safe
+- * because there is no concurrency as the object is not published yet.
+- */
+- pi_state->owner = p;
++ __attach_to_pi_owner(p, key, ps);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
+
+- *ps = pi_state;
+-
+ return 0;
+ }
+
+@@ -1464,11 +1468,14 @@ static int futex_lock_pi_atomic(u32 __us
+ * @task is guaranteed to be alive and it cannot be exiting
+ * because it is either sleeping or waiting in
+ * futex_requeue_pi_wakeup_sync().
++ *
++ * No need to do the full attach_to_pi_owner() exercise
++ * because @task is known and valid.
+ */
+ if (set_waiters) {
+- ret = attach_to_pi_owner(uaddr, newval, key, ps,
+- exiting);
+- WARN_ON(ret);
++ raw_spin_lock_irq(&task->pi_lock);
++ __attach_to_pi_owner(task, key, ps);
++ raw_spin_unlock_irq(&task->pi_lock);
+ }
+ return 1;
+ }