|
@@ -775,6 +775,69 @@ void do_softirq(void)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+static void unhandled_perf_irq(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ unsigned long pcr, pic;
|
|
|
+
|
|
|
+ read_pcr(pcr);
|
|
|
+ read_pic(pic);
|
|
|
+
|
|
|
+ write_pcr(0);
|
|
|
+
|
|
|
+ printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
|
|
|
+ smp_processor_id());
|
|
|
+ printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
|
|
|
+ smp_processor_id(), pcr, pic);
|
|
|
+}
|
|
|
+
|
|
|
+/* Almost a direct copy of the powerpc PMC code. */
|
|
|
+static DEFINE_SPINLOCK(perf_irq_lock);
|
|
|
+static void *perf_irq_owner_caller; /* mostly for debugging */
|
|
|
+static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
|
|
|
+
|
|
|
+/* Invoked from level 15 PIL handler in trap table. */
|
|
|
+void perfctr_irq(int irq, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ clear_softint(1 << irq);
|
|
|
+ perf_irq(regs);
|
|
|
+}
|
|
|
+
|
|
|
+int register_perfctr_intr(void (*handler)(struct pt_regs *))
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!handler)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ spin_lock(&perf_irq_lock);
|
|
|
+ if (perf_irq != unhandled_perf_irq) {
|
|
|
+ printk(KERN_WARNING "register_perfctr_intr: "
|
|
|
+ "perf IRQ busy (reserved by caller %p)\n",
|
|
|
+ perf_irq_owner_caller);
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ perf_irq_owner_caller = __builtin_return_address(0);
|
|
|
+ perf_irq = handler;
|
|
|
+
|
|
|
+ ret = 0;
|
|
|
+out:
|
|
|
+ spin_unlock(&perf_irq_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(register_perfctr_intr);
|
|
|
+
|
|
|
+void release_perfctr_intr(void (*handler)(struct pt_regs *))
|
|
|
+{
|
|
|
+ spin_lock(&perf_irq_lock);
|
|
|
+ perf_irq_owner_caller = NULL;
|
|
|
+ perf_irq = unhandled_perf_irq;
|
|
|
+ spin_unlock(&perf_irq_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(release_perfctr_intr);
|
|
|
+
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
void fixup_irqs(void)
|
|
|
{
|