summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2010-05-09 08:24:03 -0700
committerIngo Molnar <mingo@elte.hu>2010-05-09 19:35:26 +0200
commit8d63bf949e330588b80d30ca8f0a27a45297a9e9 (patch)
treee416fe04ddacdf3fc3e2c9ec6938da0ebbab7ff1 /kernel/time
parent8c7b09f43f4bf570654bcc458ce96819a932303c (diff)
downloadlinux-rt-8d63bf949e330588b80d30ca8f0a27a45297a9e9.tar.gz
sched: Fold updating of the last_update_time_info into update_ts_time_stats()
This patch folds the updating of the last_update_time into the update_ts_time_stats() function, and updates the callers. This allows for further cleanups that are done in the next patch. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Rik van Riel <riel@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: davej@redhat.com LKML-Reference: <20100509082403.60072967@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f15d18d82c18..e86e1c6674d1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -153,7 +153,8 @@ static void tick_nohz_update_jiffies(ktime_t now)
/*
* Updates the per cpu time idle statistics counters
*/
-static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
+static void
+update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
{
ktime_t delta;
@@ -163,13 +164,19 @@ static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
ts->idle_entrytime = now;
}
+
+ if (ts->idle_active && last_update_time)
+ *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ else
+ *last_update_time = ktime_to_us(now);
+
}
static void tick_nohz_stop_idle(int cpu, ktime_t now)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- update_ts_time_stats(ts, now);
+ update_ts_time_stats(ts, now, NULL);
ts->idle_active = 0;
sched_clock_idle_wakeup_event(0);
@@ -181,7 +188,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
now = ktime_get();
- update_ts_time_stats(ts, now);
+ update_ts_time_stats(ts, now, NULL);
ts->idle_entrytime = now;
ts->idle_active = 1;
@@ -206,18 +213,11 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
- ktime_t now;
if (!tick_nohz_enabled)
return -1;
- now = ktime_get();
- update_ts_time_stats(ts, now);
-
- if (ts->idle_active)
- *last_update_time = ktime_to_us(ts->idle_lastupdate);
- else
- *last_update_time = ktime_to_us(now);
+ update_ts_time_stats(ts, ktime_get(), last_update_time);
return ktime_to_us(ts->idle_sleeptime);
}