summaryrefslogtreecommitdiff
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-03-03 21:27:42 -0800
committerDavid S. Miller <davem@davemloft.net>2011-03-03 21:27:42 -0800
commit0a0e9ae1bd788bc19adc4d4ae08c98b233697402 (patch)
tree13825eeb5bbeae27d66e95f12168eff4b60701ab /kernel/perf_event.c
parent01a16b21d6adf992aa863186c3c4e561a57c1714 (diff)
parentb65a0e0c84cf489bfa00d6aa6c48abc5a237100f (diff)
downloadlinux-rt-0a0e9ae1bd788bc19adc4d4ae08c98b233697402.tar.gz
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/bnx2x/bnx2x.h
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..656222fcf767 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,10 @@ retry:
raw_spin_unlock_irq(&ctx->lock);
}
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_event *event, int enable);
+
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
+
+ /*
+ * Unthrottle events, since we scheduled we might have missed several
+ * ticks already, also for a heavily scheduling task there is little
+ * guarantee it'll get a tick in a timely manner.
+ */
+ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
+ perf_log_throttle(event, 1);
+ event->hw.interrupts = 0;
+ }
+
/*
* The new state must be visible before we turn it on in the hardware:
*/
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
}
}
-#define MAX_INTERRUPTS (~0ULL)
-
-static void perf_log_throttle(struct perf_event *event, int enable);
-
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;