summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c148
1 files changed, 137 insertions, 11 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 10f62c6f48e7..dbee19478f09 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -55,6 +55,11 @@
#include <linux/random.h>
#include <linux/trace_events.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
+#include "../time/tick-internal.h"
#include "tree.h"
#include "rcu.h"
@@ -260,6 +265,19 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void rcu_preempt_qs(void);
+
+void rcu_bh_qs(void)
+{
+ unsigned long flags;
+
+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
+ local_irq_save(flags);
+ rcu_preempt_qs();
+ local_irq_restore(flags);
+}
+#else
void rcu_bh_qs(void)
{
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
@@ -269,6 +287,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
+#endif
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -449,11 +468,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
/*
* Return the number of RCU BH batches started thus far for debug & stats.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
unsigned long rcu_batches_started_bh(void)
{
return rcu_bh_state.gpnum;
}
EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
+#endif
/*
* Return the number of RCU batches completed thus far for debug & stats.
@@ -473,6 +494,7 @@ unsigned long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
@@ -481,6 +503,7 @@ unsigned long rcu_batches_completed_bh(void)
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+#endif
/*
* Return the number of RCU expedited batches completed thus far for
@@ -504,6 +527,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Force a quiescent state.
*/
@@ -522,6 +546,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
+#else
+void rcu_force_quiescent_state(void)
+{
+}
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
+#endif
+
/*
* Force a quiescent state for RCU-sched.
*/
@@ -572,9 +603,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
case RCU_FLAVOR:
rsp = rcu_state_p;
break;
+#ifndef CONFIG_PREEMPT_RT_FULL
case RCU_BH_FLAVOR:
rsp = &rcu_bh_state;
break;
+#endif
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
@@ -3016,18 +3049,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
{
struct rcu_state *rsp;
if (cpu_is_offline(smp_processor_id()))
return;
- trace_rcu_utilization(TPS("Start RCU core"));
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
- trace_rcu_utilization(TPS("End RCU core"));
}
+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
@@ -3039,20 +3071,107 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
- if (likely(!rsp->boost)) {
- rcu_do_batch(rsp, rdp);
- return;
- }
- invoke_rcu_callbacks_kthread();
+ rcu_do_batch(rsp, rdp);
+}
+
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+ /*
+ * If the thread is yielding, only wake it when this
+ * is invoked from idle
+ */
+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
+ wake_up_process(t);
}
+/*
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
+ */
static void invoke_rcu_core(void)
{
- if (cpu_online(smp_processor_id()))
- raise_softirq(RCU_SOFTIRQ);
+ unsigned long flags;
+ struct task_struct *t;
+
+ if (!cpu_online(smp_processor_id()))
+ return;
+ local_irq_save(flags);
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ t = __this_cpu_read(rcu_cpu_kthread_task);
+ if (t != NULL && current != t)
+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
+ local_irq_restore(flags);
+}
+
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+ return __this_cpu_read(rcu_cpu_has_work);
}
/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+ int spincnt;
+
+ for (spincnt = 0; spincnt < 10; spincnt++) {
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+ local_bh_disable();
+ *statusp = RCU_KTHREAD_RUNNING;
+ this_cpu_inc(rcu_cpu_kthread_loops);
+ local_irq_disable();
+ work = *workp;
+ *workp = 0;
+ local_irq_enable();
+ if (work)
+ rcu_process_callbacks();
+ local_bh_enable();
+ if (*workp == 0) {
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+ *statusp = RCU_KTHREAD_WAITING;
+ return;
+ }
+ }
+ *statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+ schedule_timeout_interruptible(2);
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+ *statusp = RCU_KTHREAD_WAITING;
+}
+
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+ .store = &rcu_cpu_kthread_task,
+ .thread_should_run = rcu_cpu_kthread_should_run,
+ .thread_fn = rcu_cpu_kthread,
+ .thread_comm = "rcuc/%u",
+ .setup = rcu_cpu_kthread_setup,
+ .park = rcu_cpu_kthread_park,
+};
+
+/*
+ * Spawn per-CPU RCU core processing kthreads.
+ */
+static int __init rcu_spawn_core_kthreads(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(rcu_cpu_has_work, cpu) = 0;
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ return 0;
+}
+early_initcall(rcu_spawn_core_kthreads);
+
+/*
* Handle any core-RCU processing required by a call_rcu() invocation.
*/
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
@@ -3195,6 +3314,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
@@ -3203,6 +3323,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
/*
* Queue an RCU callback for lazy invocation after a grace period.
@@ -3294,6 +3415,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
@@ -3320,6 +3442,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+#endif
/**
* get_state_synchronize_rcu - Snapshot current RCU state
@@ -3698,6 +3821,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
@@ -3706,6 +3830,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+#endif
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
@@ -4227,12 +4352,13 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
+#ifndef CONFIG_PREEMPT_RT_FULL
rcu_init_one(&rcu_bh_state);
+#endif
rcu_init_one(&rcu_sched_state);
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
* We don't need protection against CPU-hotplug here because