summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/apic.c78
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--include/asm-i386/apic.h5
4 files changed, 102 insertions, 2 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 2d8c6ce1ecda..acd3f1e34ca6 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -26,6 +26,7 @@
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -37,10 +38,17 @@
#include <asm/i8253.h>
#include <mach_apic.h>
+#include <mach_ipi.h>
#include "io_ports.h"
/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_bcast_ipi;
+
+/*
* Knob to control our willingness to enable the local APIC.
*/
int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
@@ -931,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
static void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt_value, tmp_value, ver;
+ int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
if (!APIC_INTEGRATED(ver))
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
+
+ if (cpu_isset(cpu, timer_bcast_ipi))
+ lvtt_value |= APIC_LVT_MASKED;
+
apic_write_around(APIC_LVTT, lvtt_value);
/*
@@ -1068,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void)
setup_APIC_timer(calibration_result);
}
-void __devinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
@@ -1080,7 +1093,10 @@ void __devinit disable_APIC_timer(void)
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ int cpu = smp_processor_id();
+
+ if (using_apic_timer &&
+ !cpu_isset(cpu, timer_bcast_ipi)) {
unsigned long v;
v = apic_read(APIC_LVTT);
@@ -1088,6 +1104,32 @@ void enable_APIC_timer(void)
}
}
+void switch_APIC_timer_to_ipi(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ !cpu_isset(cpu, timer_bcast_ipi)) {
+ disable_APIC_timer();
+ cpu_set(cpu, timer_bcast_ipi);
+ }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
+
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ cpu_isset(cpu, timer_bcast_ipi)) {
+ cpu_clear(cpu, timer_bcast_ipi);
+ enable_APIC_timer();
+ }
+}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
+
#undef APIC_DIVISOR
/*
@@ -1152,6 +1194,38 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
irq_exit();
}
+#ifndef CONFIG_SMP
+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * the NMI deadlock-detector uses this.
+ */
+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
+
+ smp_local_timer_interrupt(regs);
+}
+#endif
+
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
+{
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
+ if (!cpus_empty(mask)) {
+#ifdef CONFIG_SMP
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#else
+ /*
+ * We can directly call the apic timer interrupt handler
+ * in UP case. Minus all irq related functions
+ */
+ up_apic_timer_interrupt_call(regs);
+#endif
+ }
+}
+
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 41c5b2dc6200..a14d594bfbeb 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -302,6 +302,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (using_apic_timer)
+ smp_send_timer_broadcast_ipi(regs);
+#endif
+
return IRQ_HANDLED;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 807b0df308f1..cc049338e418 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -843,6 +843,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
unsigned int i;
unsigned int working = 0;
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+ struct cpuinfo_x86 *c = cpu_data + pr->id;
+ cpumask_t mask = cpumask_of_cpu(pr->id);
+
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ }
+#endif
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
@@ -857,6 +866,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ on_each_cpu(switch_APIC_timer_to_ipi,
+ &mask, 1, 1);
+ }
+#endif
break;
}
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 8c454aa58ac6..d30b8571573f 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog;
extern int disable_timer_pin_1;
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
+void switch_APIC_timer_to_ipi(void *cpumask);
+void switch_ipi_to_APIC_timer(void *cpumask);
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }