summaryrefslogtreecommitdiff
path: root/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
blob: 29d7622f5aed86b3d9ea3a99a89ec173a5aea57a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Fri, 2 May 2014 13:13:22 +0200
Subject: stomp-machine: create lg_global_trylock_relax() primitive

Create lg_global_trylock_relax() for use by stopper thread when it cannot
schedule, to deal with stop_cpus_lock, which is now an lglock.

Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/lglock.h      |    6 ++++++
 include/linux/spinlock_rt.h |    1 +
 kernel/locking/lglock.c     |   25 +++++++++++++++++++++++++
 kernel/locking/rtmutex.c    |    5 +++++
 4 files changed, 37 insertions(+)

--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg,
 void lg_global_lock(struct lglock *lg);
 void lg_global_unlock(struct lglock *lg);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+#define lg_global_trylock_relax(name)	lg_global_lock(name)
+#else
+void lg_global_trylock_relax(struct lglock *lg);
+#endif
+
 #else
 /* When !CONFIG_SMP, map lglock to spinlock */
 #define lglock spinlock
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -34,6 +34,7 @@ extern int atomic_dec_and_spin_lock(atom
  */
 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
 
 #define spin_lock(lock)				\
 	do {					\
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
 	preempt_enable_nort();
 }
 EXPORT_SYMBOL(lg_global_unlock);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * HACK: If you use this, you get to keep the pieces.
+ * Used in queue_stop_cpus_work() when stop machinery
+ * is called from inactive CPU, so we can't schedule.
+ */
+# define lg_do_trylock_relax(l)			\
+	do {					\
+		while (!__rt_spin_trylock(l))	\
+			cpu_relax();		\
+	} while (0)
+
+void lg_global_trylock_relax(struct lglock *lg)
+{
+	int i;
+
+	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+	for_each_possible_cpu(i) {
+		lg_lock_ptr *lock;
+		lock = per_cpu_ptr(lg->lock, i);
+		lg_do_trylock_relax(lock);
+	}
+}
+#endif
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1153,6 +1153,11 @@ void __lockfunc rt_spin_unlock_wait(spin
 }
 EXPORT_SYMBOL(rt_spin_unlock_wait);
 
+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
+{
+	return rt_mutex_trylock(lock);
+}
+
 int __lockfunc rt_spin_trylock(spinlock_t *lock)
 {
 	int ret = rt_mutex_trylock(&lock->lock);