|
@@ -36,8 +36,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
|
|
|
|
|
|
extern struct desc_ptr idt_descr;
|
|
|
extern gate_desc idt_table[];
|
|
|
-extern struct desc_ptr nmi_idt_descr;
|
|
|
-extern gate_desc nmi_idt_table[];
|
|
|
+extern struct desc_ptr debug_idt_descr;
|
|
|
+extern gate_desc debug_idt_table[];
|
|
|
|
|
|
struct gdt_page {
|
|
|
struct desc_struct gdt[GDT_ENTRIES];
|
|
@@ -316,7 +316,7 @@ static inline void set_nmi_gate(int gate, void *addr)
|
|
|
gate_desc s;
|
|
|
|
|
|
pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
|
|
|
- write_idt_entry(nmi_idt_table, gate, &s);
|
|
|
+ write_idt_entry(debug_idt_table, gate, &s);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -405,4 +405,45 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
|
|
|
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+DECLARE_PER_CPU(u32, debug_idt_ctr);
|
|
|
+static inline bool is_debug_idt_enabled(void)
|
|
|
+{
|
|
|
+ if (this_cpu_read(debug_idt_ctr))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void load_debug_idt(void)
|
|
|
+{
|
|
|
+ load_idt((const struct desc_ptr *)&debug_idt_descr);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline bool is_debug_idt_enabled(void)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void load_debug_idt(void)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * the load_current_idt() is called with interrupt disabled by local_irq_save()
|
|
|
+ * to avoid races. That way the IDT will always be set back to the expected
|
|
|
+ * descriptor.
|
|
|
+ */
|
|
|
+static inline void load_current_idt(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ if (is_debug_idt_enabled())
|
|
|
+ load_debug_idt();
|
|
|
+ else
|
|
|
+ load_idt((const struct desc_ptr *)&idt_descr);
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
#endif /* _ASM_X86_DESC_H */
|