summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-20 12:21:20 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-20 12:43:33 +0200
commit26b119bc811a73bac6ecf95bdf284bf31c7955f0 (patch)
treecc8b39284012c27c4014c253655cff7ee95f79ce /kernel
parentd7b629a34fc4134a43c730b5f0197855dc4948d0 (diff)
downloadlinux-26b119bc811a73bac6ecf95bdf284bf31c7955f0.tar.gz
perf_counter: Log irq_period changes
For the dynamic irq_period code, log whenever we change the period so that analyzing code can normalize the event flow. [ Impact: add new feature to allow more precise profiling ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.298769743@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 64113e6d1942..db02eb16c777 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void)
return 0;
}
-void perf_adjust_freq(struct perf_counter_context *ctx)
+static void perf_log_period(struct perf_counter *counter, u64 period);
+
+static void perf_adjust_freq(struct perf_counter_context *ctx)
{
struct perf_counter *counter;
u64 irq_period;
@@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx)
if (!irq_period)
irq_period = 1;
+ perf_log_period(counter, irq_period);
+
counter->hw.irq_period = irq_period;
counter->hw.interrupts = 0;
}
@@ -2407,6 +2411,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
}
/*
+ *
+ */
+
+static void perf_log_period(struct perf_counter *counter, u64 period)
+{
+ struct perf_output_handle handle;
+ int ret;
+
+ struct {
+ struct perf_event_header header;
+ u64 time;
+ u64 period;
+ } freq_event = {
+ .header = {
+ .type = PERF_EVENT_PERIOD,
+ .misc = 0,
+ .size = sizeof(freq_event),
+ },
+ .time = sched_clock(),
+ .period = period,
+ };
+
+ if (counter->hw.irq_period == period)
+ return;
+
+ ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, freq_event);
+ perf_output_end(&handle);
+}
+
+/*
* Generic counter overflow handling.
*/