|
@@ -619,28 +619,37 @@ END(syscall_badsys)
|
|
27:;
|
|
27:;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Build the entry stubs and pointer table with
|
|
|
|
- * some assembler magic.
|
|
|
|
|
|
+ * Build the entry stubs and pointer table with some assembler magic.
|
|
|
|
+ * We pack 7 stubs into a single 32-byte chunk, which will fit in a
|
|
|
|
+ * single cache line on all modern x86 implementations.
|
|
*/
|
|
*/
|
|
-.section .rodata,"a"
|
|
|
|
|
|
+.section .init.rodata,"a"
|
|
ENTRY(interrupt)
|
|
ENTRY(interrupt)
|
|
.text
|
|
.text
|
|
-
|
|
|
|
|
|
+ .p2align 5
|
|
|
|
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
|
|
ENTRY(irq_entries_start)
|
|
ENTRY(irq_entries_start)
|
|
RING0_INT_FRAME
|
|
RING0_INT_FRAME
|
|
-vector=0
|
|
|
|
-.rept NR_VECTORS
|
|
|
|
- ALIGN
|
|
|
|
- .if vector
|
|
|
|
|
|
+vector=FIRST_EXTERNAL_VECTOR
|
|
|
|
+.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
|
|
|
|
+ .balign 32
|
|
|
|
+ .rept 7
|
|
|
|
+ .if vector < NR_VECTORS
|
|
|
|
+ .if vector <> FIRST_EXTERNAL_VECTOR
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
- .endif
|
|
|
|
-1: pushl $~(vector)
|
|
|
|
|
|
+ .endif
|
|
|
|
+1: pushl $(~vector+0x80) /* Note: always in signed byte range */
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
- jmp common_interrupt
|
|
|
|
- .previous
|
|
|
|
|
|
+ .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
|
|
|
|
+ jmp 2f
|
|
|
|
+ .endif
|
|
|
|
+ .previous
|
|
.long 1b
|
|
.long 1b
|
|
- .text
|
|
|
|
|
|
+ .text
|
|
vector=vector+1
|
|
vector=vector+1
|
|
|
|
+ .endif
|
|
|
|
+ .endr
|
|
|
|
+2: jmp common_interrupt
|
|
.endr
|
|
.endr
|
|
END(irq_entries_start)
|
|
END(irq_entries_start)
|
|
|
|
|
|
@@ -652,8 +661,9 @@ END(interrupt)
|
|
* the CPU automatically disables interrupts when executing an IRQ vector,
|
|
* the CPU automatically disables interrupts when executing an IRQ vector,
|
|
* so IRQ-flags tracing has to follow that:
|
|
* so IRQ-flags tracing has to follow that:
|
|
*/
|
|
*/
|
|
- ALIGN
|
|
|
|
|
|
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
|
|
common_interrupt:
|
|
common_interrupt:
|
|
|
|
+ addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
|
|
SAVE_ALL
|
|
SAVE_ALL
|
|
TRACE_IRQS_OFF
|
|
TRACE_IRQS_OFF
|
|
movl %esp,%eax
|
|
movl %esp,%eax
|
|
@@ -678,65 +688,6 @@ ENDPROC(name)
|
|
/* The include is where all of the SMP etc. interrupts come from */
|
|
/* The include is where all of the SMP etc. interrupts come from */
|
|
#include "entry_arch.h"
|
|
#include "entry_arch.h"
|
|
|
|
|
|
-KPROBE_ENTRY(page_fault)
|
|
|
|
- RING0_EC_FRAME
|
|
|
|
- pushl $do_page_fault
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- ALIGN
|
|
|
|
-error_code:
|
|
|
|
- /* the function address is in %fs's slot on the stack */
|
|
|
|
- pushl %es
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- /*CFI_REL_OFFSET es, 0*/
|
|
|
|
- pushl %ds
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- /*CFI_REL_OFFSET ds, 0*/
|
|
|
|
- pushl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET eax, 0
|
|
|
|
- pushl %ebp
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET ebp, 0
|
|
|
|
- pushl %edi
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET edi, 0
|
|
|
|
- pushl %esi
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET esi, 0
|
|
|
|
- pushl %edx
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET edx, 0
|
|
|
|
- pushl %ecx
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET ecx, 0
|
|
|
|
- pushl %ebx
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- CFI_REL_OFFSET ebx, 0
|
|
|
|
- cld
|
|
|
|
- pushl %fs
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- /*CFI_REL_OFFSET fs, 0*/
|
|
|
|
- movl $(__KERNEL_PERCPU), %ecx
|
|
|
|
- movl %ecx, %fs
|
|
|
|
- UNWIND_ESPFIX_STACK
|
|
|
|
- popl %ecx
|
|
|
|
- CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
- /*CFI_REGISTER es, ecx*/
|
|
|
|
- movl PT_FS(%esp), %edi # get the function address
|
|
|
|
- movl PT_ORIG_EAX(%esp), %edx # get the error code
|
|
|
|
- movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
|
|
|
- mov %ecx, PT_FS(%esp)
|
|
|
|
- /*CFI_REL_OFFSET fs, ES*/
|
|
|
|
- movl $(__USER_DS), %ecx
|
|
|
|
- movl %ecx, %ds
|
|
|
|
- movl %ecx, %es
|
|
|
|
- TRACE_IRQS_OFF
|
|
|
|
- movl %esp,%eax # pt_regs pointer
|
|
|
|
- call *%edi
|
|
|
|
- jmp ret_from_exception
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-KPROBE_END(page_fault)
|
|
|
|
-
|
|
|
|
ENTRY(coprocessor_error)
|
|
ENTRY(coprocessor_error)
|
|
RING0_INT_FRAME
|
|
RING0_INT_FRAME
|
|
pushl $0
|
|
pushl $0
|
|
@@ -767,140 +718,6 @@ ENTRY(device_not_available)
|
|
CFI_ENDPROC
|
|
CFI_ENDPROC
|
|
END(device_not_available)
|
|
END(device_not_available)
|
|
|
|
|
|
-/*
|
|
|
|
- * Debug traps and NMI can happen at the one SYSENTER instruction
|
|
|
|
- * that sets up the real kernel stack. Check here, since we can't
|
|
|
|
- * allow the wrong stack to be used.
|
|
|
|
- *
|
|
|
|
- * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
|
|
|
|
- * already pushed 3 words if it hits on the sysenter instruction:
|
|
|
|
- * eflags, cs and eip.
|
|
|
|
- *
|
|
|
|
- * We just load the right stack, and push the three (known) values
|
|
|
|
- * by hand onto the new stack - while updating the return eip past
|
|
|
|
- * the instruction that would have done it for sysenter.
|
|
|
|
- */
|
|
|
|
-#define FIX_STACK(offset, ok, label) \
|
|
|
|
- cmpw $__KERNEL_CS,4(%esp); \
|
|
|
|
- jne ok; \
|
|
|
|
-label: \
|
|
|
|
- movl TSS_sysenter_sp0+offset(%esp),%esp; \
|
|
|
|
- CFI_DEF_CFA esp, 0; \
|
|
|
|
- CFI_UNDEFINED eip; \
|
|
|
|
- pushfl; \
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
- pushl $__KERNEL_CS; \
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
- pushl $sysenter_past_esp; \
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
- CFI_REL_OFFSET eip, 0
|
|
|
|
-
|
|
|
|
-KPROBE_ENTRY(debug)
|
|
|
|
- RING0_INT_FRAME
|
|
|
|
- cmpl $ia32_sysenter_target,(%esp)
|
|
|
|
- jne debug_stack_correct
|
|
|
|
- FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
|
|
|
|
-debug_stack_correct:
|
|
|
|
- pushl $-1 # mark this as an int
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- SAVE_ALL
|
|
|
|
- TRACE_IRQS_OFF
|
|
|
|
- xorl %edx,%edx # error code 0
|
|
|
|
- movl %esp,%eax # pt_regs pointer
|
|
|
|
- call do_debug
|
|
|
|
- jmp ret_from_exception
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-KPROBE_END(debug)
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * NMI is doubly nasty. It can happen _while_ we're handling
|
|
|
|
- * a debug fault, and the debug fault hasn't yet been able to
|
|
|
|
- * clear up the stack. So we first check whether we got an
|
|
|
|
- * NMI on the sysenter entry path, but after that we need to
|
|
|
|
- * check whether we got an NMI on the debug path where the debug
|
|
|
|
- * fault happened on the sysenter path.
|
|
|
|
- */
|
|
|
|
-KPROBE_ENTRY(nmi)
|
|
|
|
- RING0_INT_FRAME
|
|
|
|
- pushl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- movl %ss, %eax
|
|
|
|
- cmpw $__ESPFIX_SS, %ax
|
|
|
|
- popl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
- je nmi_espfix_stack
|
|
|
|
- cmpl $ia32_sysenter_target,(%esp)
|
|
|
|
- je nmi_stack_fixup
|
|
|
|
- pushl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- movl %esp,%eax
|
|
|
|
- /* Do not access memory above the end of our stack page,
|
|
|
|
- * it might not exist.
|
|
|
|
- */
|
|
|
|
- andl $(THREAD_SIZE-1),%eax
|
|
|
|
- cmpl $(THREAD_SIZE-20),%eax
|
|
|
|
- popl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
- jae nmi_stack_correct
|
|
|
|
- cmpl $ia32_sysenter_target,12(%esp)
|
|
|
|
- je nmi_debug_stack_check
|
|
|
|
-nmi_stack_correct:
|
|
|
|
- /* We have a RING0_INT_FRAME here */
|
|
|
|
- pushl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- SAVE_ALL
|
|
|
|
- TRACE_IRQS_OFF
|
|
|
|
- xorl %edx,%edx # zero error code
|
|
|
|
- movl %esp,%eax # pt_regs pointer
|
|
|
|
- call do_nmi
|
|
|
|
- jmp restore_nocheck_notrace
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-
|
|
|
|
-nmi_stack_fixup:
|
|
|
|
- RING0_INT_FRAME
|
|
|
|
- FIX_STACK(12,nmi_stack_correct, 1)
|
|
|
|
- jmp nmi_stack_correct
|
|
|
|
-
|
|
|
|
-nmi_debug_stack_check:
|
|
|
|
- /* We have a RING0_INT_FRAME here */
|
|
|
|
- cmpw $__KERNEL_CS,16(%esp)
|
|
|
|
- jne nmi_stack_correct
|
|
|
|
- cmpl $debug,(%esp)
|
|
|
|
- jb nmi_stack_correct
|
|
|
|
- cmpl $debug_esp_fix_insn,(%esp)
|
|
|
|
- ja nmi_stack_correct
|
|
|
|
- FIX_STACK(24,nmi_stack_correct, 1)
|
|
|
|
- jmp nmi_stack_correct
|
|
|
|
-
|
|
|
|
-nmi_espfix_stack:
|
|
|
|
- /* We have a RING0_INT_FRAME here.
|
|
|
|
- *
|
|
|
|
- * create the pointer to lss back
|
|
|
|
- */
|
|
|
|
- pushl %ss
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- pushl %esp
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- addw $4, (%esp)
|
|
|
|
- /* copy the iret frame of 12 bytes */
|
|
|
|
- .rept 3
|
|
|
|
- pushl 16(%esp)
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- .endr
|
|
|
|
- pushl %eax
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- SAVE_ALL
|
|
|
|
- TRACE_IRQS_OFF
|
|
|
|
- FIXUP_ESPFIX_STACK # %eax == %esp
|
|
|
|
- xorl %edx,%edx # zero error code
|
|
|
|
- call do_nmi
|
|
|
|
- RESTORE_REGS
|
|
|
|
- lss 12+4(%esp), %esp # back to espfix stack
|
|
|
|
- CFI_ADJUST_CFA_OFFSET -24
|
|
|
|
- jmp irq_return
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-KPROBE_END(nmi)
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#ifdef CONFIG_PARAVIRT
|
|
ENTRY(native_iret)
|
|
ENTRY(native_iret)
|
|
iret
|
|
iret
|
|
@@ -916,19 +733,6 @@ ENTRY(native_irq_enable_sysexit)
|
|
END(native_irq_enable_sysexit)
|
|
END(native_irq_enable_sysexit)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-KPROBE_ENTRY(int3)
|
|
|
|
- RING0_INT_FRAME
|
|
|
|
- pushl $-1 # mark this as an int
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- SAVE_ALL
|
|
|
|
- TRACE_IRQS_OFF
|
|
|
|
- xorl %edx,%edx # zero error code
|
|
|
|
- movl %esp,%eax # pt_regs pointer
|
|
|
|
- call do_int3
|
|
|
|
- jmp ret_from_exception
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-KPROBE_END(int3)
|
|
|
|
-
|
|
|
|
ENTRY(overflow)
|
|
ENTRY(overflow)
|
|
RING0_INT_FRAME
|
|
RING0_INT_FRAME
|
|
pushl $0
|
|
pushl $0
|
|
@@ -993,14 +797,6 @@ ENTRY(stack_segment)
|
|
CFI_ENDPROC
|
|
CFI_ENDPROC
|
|
END(stack_segment)
|
|
END(stack_segment)
|
|
|
|
|
|
-KPROBE_ENTRY(general_protection)
|
|
|
|
- RING0_EC_FRAME
|
|
|
|
- pushl $do_general_protection
|
|
|
|
- CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
- jmp error_code
|
|
|
|
- CFI_ENDPROC
|
|
|
|
-KPROBE_END(general_protection)
|
|
|
|
-
|
|
|
|
ENTRY(alignment_check)
|
|
ENTRY(alignment_check)
|
|
RING0_EC_FRAME
|
|
RING0_EC_FRAME
|
|
pushl $do_alignment_check
|
|
pushl $do_alignment_check
|
|
@@ -1211,3 +1007,227 @@ END(mcount)
|
|
#include "syscall_table_32.S"
|
|
#include "syscall_table_32.S"
|
|
|
|
|
|
syscall_table_size=(.-sys_call_table)
|
|
syscall_table_size=(.-sys_call_table)
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Some functions should be protected against kprobes
|
|
|
|
+ */
|
|
|
|
+ .pushsection .kprobes.text, "ax"
|
|
|
|
+
|
|
|
|
+ENTRY(page_fault)
|
|
|
|
+ RING0_EC_FRAME
|
|
|
|
+ pushl $do_page_fault
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ ALIGN
|
|
|
|
+error_code:
|
|
|
|
+ /* the function address is in %fs's slot on the stack */
|
|
|
|
+ pushl %es
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ /*CFI_REL_OFFSET es, 0*/
|
|
|
|
+ pushl %ds
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ /*CFI_REL_OFFSET ds, 0*/
|
|
|
|
+ pushl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET eax, 0
|
|
|
|
+ pushl %ebp
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET ebp, 0
|
|
|
|
+ pushl %edi
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET edi, 0
|
|
|
|
+ pushl %esi
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET esi, 0
|
|
|
|
+ pushl %edx
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET edx, 0
|
|
|
|
+ pushl %ecx
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET ecx, 0
|
|
|
|
+ pushl %ebx
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ CFI_REL_OFFSET ebx, 0
|
|
|
|
+ cld
|
|
|
|
+ pushl %fs
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ /*CFI_REL_OFFSET fs, 0*/
|
|
|
|
+ movl $(__KERNEL_PERCPU), %ecx
|
|
|
|
+ movl %ecx, %fs
|
|
|
|
+ UNWIND_ESPFIX_STACK
|
|
|
|
+ popl %ecx
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
+ /*CFI_REGISTER es, ecx*/
|
|
|
|
+ movl PT_FS(%esp), %edi # get the function address
|
|
|
|
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
|
|
|
|
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
|
|
|
+ mov %ecx, PT_FS(%esp)
|
|
|
|
+ /*CFI_REL_OFFSET fs, ES*/
|
|
|
|
+ movl $(__USER_DS), %ecx
|
|
|
|
+ movl %ecx, %ds
|
|
|
|
+ movl %ecx, %es
|
|
|
|
+ TRACE_IRQS_OFF
|
|
|
|
+ movl %esp,%eax # pt_regs pointer
|
|
|
|
+ call *%edi
|
|
|
|
+ jmp ret_from_exception
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(page_fault)
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Debug traps and NMI can happen at the one SYSENTER instruction
|
|
|
|
+ * that sets up the real kernel stack. Check here, since we can't
|
|
|
|
+ * allow the wrong stack to be used.
|
|
|
|
+ *
|
|
|
|
+ * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
|
|
|
|
+ * already pushed 3 words if it hits on the sysenter instruction:
|
|
|
|
+ * eflags, cs and eip.
|
|
|
|
+ *
|
|
|
|
+ * We just load the right stack, and push the three (known) values
|
|
|
|
+ * by hand onto the new stack - while updating the return eip past
|
|
|
|
+ * the instruction that would have done it for sysenter.
|
|
|
|
+ */
|
|
|
|
+#define FIX_STACK(offset, ok, label) \
|
|
|
|
+ cmpw $__KERNEL_CS,4(%esp); \
|
|
|
|
+ jne ok; \
|
|
|
|
+label: \
|
|
|
|
+ movl TSS_sysenter_sp0+offset(%esp),%esp; \
|
|
|
|
+ CFI_DEF_CFA esp, 0; \
|
|
|
|
+ CFI_UNDEFINED eip; \
|
|
|
|
+ pushfl; \
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
+ pushl $__KERNEL_CS; \
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
+ pushl $sysenter_past_esp; \
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4; \
|
|
|
|
+ CFI_REL_OFFSET eip, 0
|
|
|
|
+
|
|
|
|
+ENTRY(debug)
|
|
|
|
+ RING0_INT_FRAME
|
|
|
|
+ cmpl $ia32_sysenter_target,(%esp)
|
|
|
|
+ jne debug_stack_correct
|
|
|
|
+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
|
|
|
|
+debug_stack_correct:
|
|
|
|
+ pushl $-1 # mark this as an int
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ SAVE_ALL
|
|
|
|
+ TRACE_IRQS_OFF
|
|
|
|
+ xorl %edx,%edx # error code 0
|
|
|
|
+ movl %esp,%eax # pt_regs pointer
|
|
|
|
+ call do_debug
|
|
|
|
+ jmp ret_from_exception
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(debug)
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * NMI is doubly nasty. It can happen _while_ we're handling
|
|
|
|
+ * a debug fault, and the debug fault hasn't yet been able to
|
|
|
|
+ * clear up the stack. So we first check whether we got an
|
|
|
|
+ * NMI on the sysenter entry path, but after that we need to
|
|
|
|
+ * check whether we got an NMI on the debug path where the debug
|
|
|
|
+ * fault happened on the sysenter path.
|
|
|
|
+ */
|
|
|
|
+ENTRY(nmi)
|
|
|
|
+ RING0_INT_FRAME
|
|
|
|
+ pushl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ movl %ss, %eax
|
|
|
|
+ cmpw $__ESPFIX_SS, %ax
|
|
|
|
+ popl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
+ je nmi_espfix_stack
|
|
|
|
+ cmpl $ia32_sysenter_target,(%esp)
|
|
|
|
+ je nmi_stack_fixup
|
|
|
|
+ pushl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ movl %esp,%eax
|
|
|
|
+ /* Do not access memory above the end of our stack page,
|
|
|
|
+ * it might not exist.
|
|
|
|
+ */
|
|
|
|
+ andl $(THREAD_SIZE-1),%eax
|
|
|
|
+ cmpl $(THREAD_SIZE-20),%eax
|
|
|
|
+ popl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET -4
|
|
|
|
+ jae nmi_stack_correct
|
|
|
|
+ cmpl $ia32_sysenter_target,12(%esp)
|
|
|
|
+ je nmi_debug_stack_check
|
|
|
|
+nmi_stack_correct:
|
|
|
|
+ /* We have a RING0_INT_FRAME here */
|
|
|
|
+ pushl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ SAVE_ALL
|
|
|
|
+ TRACE_IRQS_OFF
|
|
|
|
+ xorl %edx,%edx # zero error code
|
|
|
|
+ movl %esp,%eax # pt_regs pointer
|
|
|
|
+ call do_nmi
|
|
|
|
+ jmp restore_nocheck_notrace
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+
|
|
|
|
+nmi_stack_fixup:
|
|
|
|
+ RING0_INT_FRAME
|
|
|
|
+ FIX_STACK(12,nmi_stack_correct, 1)
|
|
|
|
+ jmp nmi_stack_correct
|
|
|
|
+
|
|
|
|
+nmi_debug_stack_check:
|
|
|
|
+ /* We have a RING0_INT_FRAME here */
|
|
|
|
+ cmpw $__KERNEL_CS,16(%esp)
|
|
|
|
+ jne nmi_stack_correct
|
|
|
|
+ cmpl $debug,(%esp)
|
|
|
|
+ jb nmi_stack_correct
|
|
|
|
+ cmpl $debug_esp_fix_insn,(%esp)
|
|
|
|
+ ja nmi_stack_correct
|
|
|
|
+ FIX_STACK(24,nmi_stack_correct, 1)
|
|
|
|
+ jmp nmi_stack_correct
|
|
|
|
+
|
|
|
|
+nmi_espfix_stack:
|
|
|
|
+ /* We have a RING0_INT_FRAME here.
|
|
|
|
+ *
|
|
|
|
+ * create the pointer to lss back
|
|
|
|
+ */
|
|
|
|
+ pushl %ss
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ pushl %esp
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ addw $4, (%esp)
|
|
|
|
+ /* copy the iret frame of 12 bytes */
|
|
|
|
+ .rept 3
|
|
|
|
+ pushl 16(%esp)
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ .endr
|
|
|
|
+ pushl %eax
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ SAVE_ALL
|
|
|
|
+ TRACE_IRQS_OFF
|
|
|
|
+ FIXUP_ESPFIX_STACK # %eax == %esp
|
|
|
|
+ xorl %edx,%edx # zero error code
|
|
|
|
+ call do_nmi
|
|
|
|
+ RESTORE_REGS
|
|
|
|
+ lss 12+4(%esp), %esp # back to espfix stack
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET -24
|
|
|
|
+ jmp irq_return
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(nmi)
|
|
|
|
+
|
|
|
|
+ENTRY(int3)
|
|
|
|
+ RING0_INT_FRAME
|
|
|
|
+ pushl $-1 # mark this as an int
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ SAVE_ALL
|
|
|
|
+ TRACE_IRQS_OFF
|
|
|
|
+ xorl %edx,%edx # zero error code
|
|
|
|
+ movl %esp,%eax # pt_regs pointer
|
|
|
|
+ call do_int3
|
|
|
|
+ jmp ret_from_exception
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(int3)
|
|
|
|
+
|
|
|
|
+ENTRY(general_protection)
|
|
|
|
+ RING0_EC_FRAME
|
|
|
|
+ pushl $do_general_protection
|
|
|
|
+ CFI_ADJUST_CFA_OFFSET 4
|
|
|
|
+ jmp error_code
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(general_protection)
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * End of kprobes section
|
|
|
|
+ */
|
|
|
|
+ .popsection
|