From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:36 +0200 Subject: [PATCH 39/72] locking/mutex: Make mutex::wait_lock raw The wait_lock of mutex is really a low level lock. Convert it to a raw_spinlock like the wait_lock of rtmutex. [ mingo: backmerged the test_lockup.c build fix by bigeasy. ] Co-developed-by: Sebastian Andrzej Siewior Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211304.166863404@linutronix.de --- include/linux/mutex.h | 4 ++-- kernel/locking/mutex.c | 22 +++++++++++----------- lib/test_lockup.c | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -50,7 +50,7 @@ */ struct mutex { atomic_long_t owner; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif @@ -105,7 +105,7 @@ do { \ #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -36,7 +36,7 @@ void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { atomic_long_set(&lock->owner, 0); - spin_lock_init(&lock->wait_lock); + raw_spin_lock_init(&lock->wait_lock); INIT_LIST_HEAD(&lock->wait_list); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER osq_lock_init(&lock->osq); @@ -487,9 +487,9 @@ ww_mutex_set_context_fastpath(struct ww_ * Uh oh, we raced in fastpath, check if any of the waiters need to * die or wound us. */ - spin_lock(&lock->base.wait_lock); + raw_spin_lock(&lock->base.wait_lock); __ww_mutex_check_waiters(&lock->base, ctx); - spin_unlock(&lock->base.wait_lock); + raw_spin_unlock(&lock->base.wait_lock); } #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -964,7 +964,7 @@ static __always_inline int __sched return 0; } - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); /* * After waiting to acquire the wait_lock, try again. */ @@ -1028,7 +1028,7 @@ static __always_inline int __sched goto err; } - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); schedule_preempt_disabled(); /* @@ -1051,9 +1051,9 @@ static __always_inline int __sched (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) break; - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); } - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); acquired: __set_current_state(TASK_RUNNING); @@ -1078,7 +1078,7 @@ static __always_inline int __sched if (ww_ctx) ww_mutex_lock_acquired(ww, ww_ctx); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); preempt_enable(); return 0; @@ -1086,7 +1086,7 @@ static __always_inline int __sched __set_current_state(TASK_RUNNING); __mutex_remove_waiter(lock, &waiter); err_early_kill: - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, ip); preempt_enable(); @@ -1255,7 +1255,7 @@ static noinline void __sched __mutex_unl owner = old; } - spin_lock(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); debug_mutex_unlock(lock); if (!list_empty(&lock->wait_list)) { /* get the first entry from the wait-list: */ @@ -1272,7 +1272,7 @@ static noinline void __sched __mutex_unl if (owner & MUTEX_FLAG_HANDOFF) __mutex_handoff(lock, next); - spin_unlock(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); wake_up_q(&wake_q); } --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -502,7 +502,7 @@ static int __init test_lockup_init(void) offsetof(rwlock_t, magic), RWLOCK_MAGIC) || test_magic(lock_mutex_ptr, - offsetof(struct mutex, wait_lock.rlock.magic), + offsetof(struct mutex, wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwsem_ptr, offsetof(struct rw_semaphore, wait_lock.magic),