summaryrefslogtreecommitdiff
path: root/patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch')
-rw-r--r--patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch98
1 files changed, 98 insertions, 0 deletions
diff --git a/patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch b/patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch
new file mode 100644
index 000000000000..9aaabc2595e4
--- /dev/null
+++ b/patches/0048-locking-ww_mutex-Abstract-out-mutex-types.patch
@@ -0,0 +1,98 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Sun, 15 Aug 2021 23:28:50 +0200
+Subject: [PATCH 48/72] locking/ww_mutex: Abstract out mutex types
+
+Some ww_mutex helper functions use pointers for the underlying mutex and
+mutex_waiter. The upcoming rtmutex based implementation needs to share
+these functions. Add and use defines for the types and replace the direct
+types in the affected functions.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Link: https://lore.kernel.org/r/20210815211304.678720245@linutronix.de
+---
+ kernel/locking/ww_mutex.h | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/kernel/locking/ww_mutex.h
++++ b/kernel/locking/ww_mutex.h
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0-only */
+
++#define MUTEX mutex
++#define MUTEX_WAITER mutex_waiter
++
+ static inline struct mutex_waiter *
+ __ww_waiter_first(struct mutex *lock)
+ {
+@@ -143,7 +146,7 @@ static inline bool
+ * __ww_mutex_check_kill() wake any but the earliest context.
+ */
+ static bool
+-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
++__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+ struct ww_acquire_ctx *ww_ctx)
+ {
+ if (!ww_ctx->is_wait_die)
+@@ -165,7 +168,7 @@ static bool
+ * the lock holders. Even if multiple waiters may wound the lock holder,
+ * it's sufficient that only one does.
+ */
+-static bool __ww_mutex_wound(struct mutex *lock,
++static bool __ww_mutex_wound(struct MUTEX *lock,
+ struct ww_acquire_ctx *ww_ctx,
+ struct ww_acquire_ctx *hold_ctx)
+ {
+@@ -220,9 +223,9 @@ static bool __ww_mutex_wound(struct mute
+ * The current task must not be on the wait list.
+ */
+ static void
+-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
++__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+ {
+- struct mutex_waiter *cur;
++ struct MUTEX_WAITER *cur;
+
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -278,7 +281,7 @@ ww_mutex_set_context_fastpath(struct ww_
+ }
+
+ static __always_inline int
+-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
++__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
+ {
+ if (ww_ctx->acquired > 0) {
+ #ifdef CONFIG_DEBUG_MUTEXES
+@@ -306,12 +309,12 @@ static __always_inline int
+ * look at waiters before us in the wait-list.
+ */
+ static inline int
+-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
++__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
+ struct ww_acquire_ctx *ctx)
+ {
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+- struct mutex_waiter *cur;
++ struct MUTEX_WAITER *cur;
+
+ if (ctx->acquired == 0)
+ return 0;
+@@ -354,11 +357,11 @@ static inline int
+ * Wound-Wait ensure we wound the owning context when it is younger.
+ */
+ static inline int
+-__ww_mutex_add_waiter(struct mutex_waiter *waiter,
+- struct mutex *lock,
++__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
++ struct MUTEX *lock,
+ struct ww_acquire_ctx *ww_ctx)
+ {
+- struct mutex_waiter *cur, *pos = NULL;
++ struct MUTEX_WAITER *cur, *pos = NULL;
+ bool is_wait_die;
+
+ if (!ww_ctx) {