diff options
author | Luiz Capitulino <lcapitulino@redhat.com> | 2016-05-27 15:03:28 +0200 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2016-07-12 17:00:28 -0400 |
commit | 222788a4688e305cbb3992f69baf8a88a5853c47 (patch) | |
tree | a6cc1681d552f3ec21204d9d92a057e26a30a95f | |
parent | d90a745aa361ecba9051870de4d6fae88069677f (diff) | |
download | linux-rt-222788a4688e305cbb3992f69baf8a88a5853c47.tar.gz |
mm: perform lru_add_drain_all() remotely
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
on all CPUs that have non-empty LRU pagevecs and then waiting for
the scheduled work to complete. However, workqueue threads may never
have the chance to run on a CPU that's running a SCHED_FIFO task.
This causes lru_add_drain_all() to block forever.
This commit solves this problem by changing lru_add_drain_all()
to drain the LRU pagevecs of remote CPUs. This is done by grabbing
swapvec_lock and calling lru_add_drain_cpu().
PS: This is based on an idea and initial implementation by
Rik van Riel.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | mm/swap.c | 37 |
1 files changed, 30 insertions, 7 deletions
diff --git a/mm/swap.c b/mm/swap.c index acb833351464..745dd2a52288 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -811,9 +811,15 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ +#ifdef CONFIG_PREEMPT_RT_BASE + local_lock_irqsave_on(rotate_lock, flags, cpu); + pagevec_move_tail(pvec); + local_unlock_irqrestore_on(rotate_lock, flags, cpu); +#else local_lock_irqsave(rotate_lock, flags); pagevec_move_tail(pvec); local_unlock_irqrestore(rotate_lock, flags); +#endif } pvec = &per_cpu(lru_deactivate_pvecs, cpu); @@ -856,12 +862,32 @@ void lru_add_drain(void) local_unlock_cpu(swapvec_lock); } + +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) +{ + local_lock_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); + local_unlock_on(swapvec_lock, cpu); +} + +#else + static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) +{ + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + + INIT_WORK(work, lru_add_drain_per_cpu); + schedule_work_on(cpu, work); + cpumask_set_cpu(cpu, has_work); +} +#endif void lru_add_drain_all(void) { @@ -874,20 +900,17 @@ void lru_add_drain_all(void) cpumask_clear(&has_work); for_each_online_cpu(cpu) { - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || - need_activate_page_drain(cpu)) { - INIT_WORK(work, lru_add_drain_per_cpu); - schedule_work_on(cpu, work); - cpumask_set_cpu(cpu, &has_work); - } + need_activate_page_drain(cpu)) + remote_lru_add_drain(cpu, &has_work); } +#ifndef CONFIG_PREEMPT_RT_BASE for_each_cpu(cpu, &has_work) flush_work(&per_cpu(lru_add_drain_work, cpu)); +#endif put_online_cpus(); mutex_unlock(&lock); |