|
@@ -52,6 +52,7 @@
|
|
|
#include <asm/mce.h>
|
|
|
#include <asm/kvm_para.h>
|
|
|
#include <asm/tsc.h>
|
|
|
+#include <asm/atomic.h>
|
|
|
|
|
|
unsigned int num_processors;
|
|
|
|
|
@@ -370,24 +371,88 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Setup extended LVT, AMD specific (K8, family 10h)
|
|
|
+ * Setup extended LVT, AMD specific
|
|
|
*
|
|
|
- * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
|
|
|
- * MCE interrupts are supported. Thus MCE offset must be set to 0.
|
|
|
+ * Software should use the LVT offsets the BIOS provides. The offsets
|
|
|
+ * are determined by the subsystems using it like those for MCE
|
|
|
+ * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
|
|
|
+ * are supported. Beginning with family 10h at least 4 offsets are
|
|
|
+ * available.
|
|
|
*
|
|
|
- * If mask=1, the LVT entry does not generate interrupts while mask=0
|
|
|
- * enables the vector. See also the BKDGs.
|
|
|
+ * Since the offsets must be consistent for all cores, we keep track
|
|
|
+ * of the LVT offsets in software and reserve the offset for the same
|
|
|
+ * vector also to be used on other cores. An offset is freed by
|
|
|
+ * setting the entry to APIC_EILVT_MASKED.
|
|
|
+ *
|
|
|
+ * If the BIOS is right, there should be no conflicts. Otherwise a
|
|
|
+ * "[Firmware Bug]: ..." error message is generated. However, if
|
|
|
+ * software does not properly determines the offsets, it is not
|
|
|
+ * necessarily a BIOS bug.
|
|
|
*/
|
|
|
|
|
|
#define APIC_EILVT_LVTOFF_MCE 0
|
|
|
#define APIC_EILVT_LVTOFF_IBS 1
|
|
|
|
|
|
-static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
|
|
+static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
|
|
|
+
|
|
|
+static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
|
|
|
+{
|
|
|
+ return (old & APIC_EILVT_MASKED)
|
|
|
+ || (new == APIC_EILVT_MASKED)
|
|
|
+ || ((new & ~APIC_EILVT_MASKED) == old);
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
|
|
|
+{
|
|
|
+ unsigned int rsvd; /* 0: uninitialized */
|
|
|
+
|
|
|
+ if (offset >= APIC_EILVT_NR_MAX)
|
|
|
+ return ~0;
|
|
|
+
|
|
|
+ rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED;
|
|
|
+ do {
|
|
|
+ if (rsvd &&
|
|
|
+ !eilvt_entry_is_changeable(rsvd, new))
|
|
|
+ /* may not change if vectors are different */
|
|
|
+ return rsvd;
|
|
|
+ rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
|
|
|
+ } while (rsvd != new);
|
|
|
+
|
|
|
+ return new;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If mask=1, the LVT entry does not generate interrupts while mask=0
|
|
|
+ * enables the vector. See also the BKDGs.
|
|
|
+ */
|
|
|
+
|
|
|
+static int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
|
|
|
{
|
|
|
- unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0);
|
|
|
- unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
|
|
+ unsigned long reg = APIC_EILVTn(offset);
|
|
|
+ unsigned int new, old, reserved;
|
|
|
+
|
|
|
+ new = (mask << 16) | (msg_type << 8) | vector;
|
|
|
+ old = apic_read(reg);
|
|
|
+ reserved = reserve_eilvt_offset(offset, new);
|
|
|
|
|
|
- apic_write(reg, v);
|
|
|
+ if (reserved != new) {
|
|
|
+ pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but "
|
|
|
+ "vector 0x%x was already reserved by another core, "
|
|
|
+ "APIC%lX=0x%x\n",
|
|
|
+ smp_processor_id(), new, reserved, reg, old);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!eilvt_entry_is_changeable(old, new)) {
|
|
|
+ pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but "
|
|
|
+ "register already in use, APIC%lX=0x%x\n",
|
|
|
+ smp_processor_id(), new, reg, old);
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
+
|
|
|
+ apic_write(reg, new);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
|