summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-08-15 13:23:23 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-09-23 09:18:16 -0700
commit69a79bb12a81024d718e73c52e886907a3777b34 (patch)
tree0beef5d898f1373dbc5b4834d24d676e73ea1da2
parent756cbf6befe6f59b0b3e0967d92a66c11e2566ed (diff)
downloadlinux-69a79bb12a81024d718e73c52e886907a3777b34.tar.gz
rcu: Track rcu_nocb_kthread()'s sleeping and awakening
This commit adds event traces to track all of rcu_nocb_kthread()'s blocking and awakening. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/trace/events/rcu.h4
-rw-r--r--kernel/rcutree_plugin.h15
2 files changed, 18 insertions, 1 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 4301cd9e3ee5..a087d82ed431 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -183,8 +183,12 @@ TRACE_EVENT(rcu_grace_period_init,
* "WakeOvf": Wake rcuo kthread, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "Poll": Start of new polling cycle for rcu_nocb_poll.
+ * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
* "WokeNonEmpty": rcuo kthread woke to find non-empty list.
+ * "WaitQueue": Enqueue partially done, timed wait for it to complete.
+ * "WokeQueue": Partial enqueue now complete.
*/
TRACE_EVENT(rcu_nocb_wake,
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 24b01b69be92..21205b185340 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2230,6 +2230,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
static int rcu_nocb_kthread(void *arg)
{
int c, cl;
+ bool firsttime = 1;
struct rcu_head *list;
struct rcu_head *next;
struct rcu_head **tail;
@@ -2238,8 +2239,15 @@ static int rcu_nocb_kthread(void *arg)
/* Each pass through this loop invokes one batch of callbacks */
for (;;) {
/* If not polling, wait for next batch of callbacks. */
- if (!rcu_nocb_poll)
+ if (!rcu_nocb_poll) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("Sleep"));
wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ } else if (firsttime) {
+ firsttime = 0;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("Poll"));
+ }
list = ACCESS_ONCE(rdp->nocb_head);
if (!list) {
if (!rcu_nocb_poll)
@@ -2249,6 +2257,7 @@ static int rcu_nocb_kthread(void *arg)
flush_signals(current);
continue;
}
+ firsttime = 1;
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WokeNonEmpty"));
@@ -2271,7 +2280,11 @@ static int rcu_nocb_kthread(void *arg)
next = list->next;
/* Wait for enqueuing to complete, if needed. */
while (next == NULL && &list->next != tail) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WaitQueue"));
schedule_timeout_interruptible(1);
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WokeQueue"));
next = list->next;
}
debug_rcu_head_unqueue(list);