diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-10-05 11:59:38 -0700 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-01-16 18:12:39 -0500 |
commit | 59fb283513838f8aaadfa7ede02090d53543f604 (patch) | |
tree | eebe528f2a3af03a49677cbcc84a7e9b21b7ff2a /kernel/rcutree.c | |
parent | 86aa6cf2a1d62dd4986d271a126a74ecae8789ea (diff) | |
download | linux-rt-59fb283513838f8aaadfa7ede02090d53543f604.tar.gz |
rcu: Merge RCU-bh into RCU-preempt
The Linux kernel has long RCU-bh read-side critical sections that
intolerably increase scheduling latency under mainline's RCU-bh rules,
which include RCU-bh read-side critical sections being non-preemptible.
This patch therefore arranges for RCU-bh to be implemented in terms of
RCU-preempt for CONFIG_PREEMPT_RT_FULL=y.
This has the downside of defeating the purpose of RCU-bh, namely,
handling the case where the system is subjected to a network-based
denial-of-service attack that keeps at least one CPU doing full-time
softirq processing. This issue will be fixed by a later commit.
The current commit will need some work to make it appropriate for
mainline use, for example, it needs to be extended to cover Tiny RCU.
[ paulmck: Added a useful changelog ]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 35380019f0fc..c43bd26842a4 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -182,6 +182,7 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } +#ifndef CONFIG_PREEMPT_RT_FULL void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); @@ -190,6 +191,7 @@ void rcu_bh_qs(int cpu) trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; } +#endif /* * Note a context switch. This is a quiescent state for RCU-sched, @@ -239,6 +241,7 @@ long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU BH batches processed thus far for debug & stats. */ @@ -256,6 +259,7 @@ void rcu_bh_force_quiescent_state(void) force_quiescent_state(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#endif /* * Record the number of times rcutorture tests have been initiated and @@ -2448,6 +2452,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue an RCU callback for invocation after a quicker grace period. */ @@ -2456,6 +2461,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Because a context switch is a grace period for RCU-sched and RCU-bh, @@ -2533,6 +2539,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -2559,6 +2566,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif static int synchronize_sched_expedited_cpu_stop(void *data) { @@ -2955,6 +2963,7 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -2963,6 +2972,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. |