|
@@ -166,6 +166,34 @@ static inline bool invalid_selector(u16 value)
|
|
|
|
|
|
#define FLAG_MASK FLAG_MASK_32
|
|
#define FLAG_MASK FLAG_MASK_32
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
|
|
|
|
+ * when it traps. The previous stack will be directly underneath the saved
|
|
|
|
+ * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
|
|
|
|
+ *
|
|
|
|
+ * Now, if the stack is empty, '®s->sp' is out of range. In this
|
|
|
|
+ * case we try to take the previous stack. To always return a non-null
|
|
|
|
+ * stack pointer we fall back to regs as stack if no previous stack
|
|
|
|
+ * exists.
|
|
|
|
+ *
|
|
|
|
+ * This is valid only for kernel mode traps.
|
|
|
|
+ */
|
|
|
|
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
|
|
|
+{
|
|
|
|
+ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
|
|
|
|
+ unsigned long sp = (unsigned long)®s->sp;
|
|
|
|
+ struct thread_info *tinfo;
|
|
|
|
+
|
|
|
|
+ if (context == (sp & ~(THREAD_SIZE - 1)))
|
|
|
|
+ return sp;
|
|
|
|
+
|
|
|
|
+ tinfo = (struct thread_info *)context;
|
|
|
|
+ if (tinfo->previous_esp)
|
|
|
|
+ return tinfo->previous_esp;
|
|
|
|
+
|
|
|
|
+ return (unsigned long)regs;
|
|
|
|
+}
|
|
|
|
+
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
|
|
{
|
|
{
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
|