|
@@ -26,6 +26,7 @@
|
|
|
#include <linux/kernel_stat.h>
|
|
|
#include <linux/sysdev.h>
|
|
|
#include <linux/cpu.h>
|
|
|
+#include <linux/module.h>
|
|
|
|
|
|
#include <asm/atomic.h>
|
|
|
#include <asm/smp.h>
|
|
@@ -37,9 +38,16 @@
|
|
|
#include <asm/i8253.h>
|
|
|
|
|
|
#include <mach_apic.h>
|
|
|
+#include <mach_ipi.h>
|
|
|
|
|
|
#include "io_ports.h"
|
|
|
|
|
|
+/*
|
|
|
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
|
|
|
+ * IPIs in place of local APIC timers
|
|
|
+ */
|
|
|
+static cpumask_t timer_bcast_ipi;
|
|
|
+
|
|
|
/*
|
|
|
* Knob to control our willingness to enable the local APIC.
|
|
|
*/
|
|
@@ -931,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
|
|
|
static void __setup_APIC_LVTT(unsigned int clocks)
|
|
|
{
|
|
|
unsigned int lvtt_value, tmp_value, ver;
|
|
|
+ int cpu = smp_processor_id();
|
|
|
|
|
|
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
|
|
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
|
|
|
if (!APIC_INTEGRATED(ver))
|
|
|
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
|
|
|
+
|
|
|
+ if (cpu_isset(cpu, timer_bcast_ipi))
|
|
|
+ lvtt_value |= APIC_LVT_MASKED;
|
|
|
+
|
|
|
apic_write_around(APIC_LVTT, lvtt_value);
|
|
|
|
|
|
/*
|
|
@@ -1068,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void)
|
|
|
setup_APIC_timer(calibration_result);
|
|
|
}
|
|
|
|
|
|
-void __devinit disable_APIC_timer(void)
|
|
|
+void disable_APIC_timer(void)
|
|
|
{
|
|
|
if (using_apic_timer) {
|
|
|
unsigned long v;
|
|
@@ -1080,7 +1093,10 @@ void __devinit disable_APIC_timer(void)
|
|
|
|
|
|
void enable_APIC_timer(void)
|
|
|
{
|
|
|
- if (using_apic_timer) {
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ if (using_apic_timer &&
|
|
|
+ !cpu_isset(cpu, timer_bcast_ipi)) {
|
|
|
unsigned long v;
|
|
|
|
|
|
v = apic_read(APIC_LVTT);
|
|
@@ -1088,6 +1104,32 @@ void enable_APIC_timer(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+void switch_APIC_timer_to_ipi(void *cpumask)
|
|
|
+{
|
|
|
+ cpumask_t mask = *(cpumask_t *)cpumask;
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ if (cpu_isset(cpu, mask) &&
|
|
|
+ !cpu_isset(cpu, timer_bcast_ipi)) {
|
|
|
+ disable_APIC_timer();
|
|
|
+ cpu_set(cpu, timer_bcast_ipi);
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
|
|
|
+
|
|
|
+void switch_ipi_to_APIC_timer(void *cpumask)
|
|
|
+{
|
|
|
+ cpumask_t mask = *(cpumask_t *)cpumask;
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ if (cpu_isset(cpu, mask) &&
|
|
|
+ cpu_isset(cpu, timer_bcast_ipi)) {
|
|
|
+ cpu_clear(cpu, timer_bcast_ipi);
|
|
|
+ enable_APIC_timer();
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
|
|
|
+
|
|
|
#undef APIC_DIVISOR
|
|
|
|
|
|
/*
|
|
@@ -1152,6 +1194,38 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
|
|
|
irq_exit();
|
|
|
}
|
|
|
|
|
|
+#ifndef CONFIG_SMP
|
|
|
+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * the NMI deadlock-detector uses this.
|
|
|
+ */
|
|
|
+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
|
|
|
+
|
|
|
+ smp_local_timer_interrupt(regs);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ cpumask_t mask;
|
|
|
+
|
|
|
+ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
|
|
|
+ if (!cpus_empty(mask)) {
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
|
|
+#else
|
|
|
+ /*
|
|
|
+ * We can directly call the apic timer interrupt handler
|
|
|
+ * in UP case. Minus all irq related functions
|
|
|
+ */
|
|
|
+ up_apic_timer_interrupt_call(regs);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
|
{
|
|
|
return -EINVAL;
|