summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuiz Capitulino <lcapitulino@redhat.com>2016-05-27 15:03:28 +0200
committerSteven Rostedt <rostedt@goodmis.org>2017-06-07 19:03:24 -0400
commit70f6e26b9975b809870f74fa2914b3b2ae31310f (patch)
tree9dc4f877d39b135670895497b69f75b06e4687a2
parent07c85a443d3146b026a1d98561af82ae2c5c07ce (diff)
downloadlinux-rt-70f6e26b9975b809870f74fa2914b3b2ae31310f.tar.gz
mm: perform lru_add_drain_all() remotely
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run on all CPUs that have non-empty LRU pagevecs and then waiting for the scheduled work to complete. However, workqueue threads may never have the chance to run on a CPU that's running a SCHED_FIFO task. This causes lru_add_drain_all() to block forever. This commit solves this problem by changing lru_add_drain_all() to drain the LRU pagevecs of remote CPUs. This is done by grabbing swapvec_lock and calling lru_add_drain_cpu(). PS: This is based on an idea and initial implementation by Rik van Riel. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--mm/swap.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 0cb7d0deba9c..7f23326254dd 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -717,9 +717,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ local_lock_irqsave_on(rotate_lock, flags, cpu);
+ pagevec_move_tail(pvec);
+ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
+#else
local_lock_irqsave(rotate_lock, flags);
pagevec_move_tail(pvec);
local_unlock_irqrestore(rotate_lock, flags);
+#endif
}
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
@@ -762,12 +768,32 @@ void lru_add_drain(void)
local_unlock_cpu(swapvec_lock);
}
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+ local_lock_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
+ local_unlock_on(swapvec_lock, cpu);
+}
+
+#else
+
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+ INIT_WORK(work, lru_add_drain_per_cpu);
+ schedule_work_on(cpu, work);
+ cpumask_set_cpu(cpu, has_work);
+}
+#endif
void lru_add_drain_all(void)
{
@@ -780,20 +806,17 @@ void lru_add_drain_all(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
-
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
- need_activate_page_drain(cpu)) {
- INIT_WORK(work, lru_add_drain_per_cpu);
- schedule_work_on(cpu, work);
- cpumask_set_cpu(cpu, &has_work);
- }
+ need_activate_page_drain(cpu))
+ remote_lru_add_drain(cpu, &has_work);
}
+#ifndef CONFIG_PREEMPT_RT_BASE
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
+#endif
put_online_cpus();
mutex_unlock(&lock);