|
@@ -61,83 +61,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
|
-/*
|
|
|
|
- * Return EIP plus the CS segment base. The segment limit is also
|
|
|
|
- * adjusted, clamped to the kernel/user address space (whichever is
|
|
|
|
- * appropriate), and returned in *eip_limit.
|
|
|
|
- *
|
|
|
|
- * The segment is checked, because it might have been changed by another
|
|
|
|
- * task between the original faulting instruction and here.
|
|
|
|
- *
|
|
|
|
- * If CS is no longer a valid code segment, or if EIP is beyond the
|
|
|
|
- * limit, or if it is a kernel address when CS is not a kernel segment,
|
|
|
|
- * then the returned value will be greater than *eip_limit.
|
|
|
|
- *
|
|
|
|
- * This is slow, but is very rarely executed.
|
|
|
|
- */
|
|
|
|
-static inline unsigned long get_segment_eip(struct pt_regs *regs,
|
|
|
|
- unsigned long *eip_limit)
|
|
|
|
-{
|
|
|
|
- unsigned long ip = regs->ip;
|
|
|
|
- unsigned seg = regs->cs & 0xffff;
|
|
|
|
- u32 seg_ar, seg_limit, base, *desc;
|
|
|
|
-
|
|
|
|
- /* Unlikely, but must come before segment checks. */
|
|
|
|
- if (unlikely(regs->flags & VM_MASK)) {
|
|
|
|
- base = seg << 4;
|
|
|
|
- *eip_limit = base + 0xffff;
|
|
|
|
- return base + (ip & 0xffff);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* The standard kernel/user address space limit. */
|
|
|
|
- *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
|
|
|
|
-
|
|
|
|
- /* By far the most common cases. */
|
|
|
|
- if (likely(SEGMENT_IS_FLAT_CODE(seg)))
|
|
|
|
- return ip;
|
|
|
|
-
|
|
|
|
- /* Check the segment exists, is within the current LDT/GDT size,
|
|
|
|
- that kernel/user (ring 0..3) has the appropriate privilege,
|
|
|
|
- that it's a code segment, and get the limit. */
|
|
|
|
- __asm__ ("larl %3,%0; lsll %3,%1"
|
|
|
|
- : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
|
|
|
|
- if ((~seg_ar & 0x9800) || ip > seg_limit) {
|
|
|
|
- *eip_limit = 0;
|
|
|
|
- return 1; /* So that returned ip > *eip_limit. */
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Get the GDT/LDT descriptor base.
|
|
|
|
- When you look for races in this code remember that
|
|
|
|
- LDT and other horrors are only used in user space. */
|
|
|
|
- if (seg & (1<<2)) {
|
|
|
|
- /* Must lock the LDT while reading it. */
|
|
|
|
- mutex_lock(¤t->mm->context.lock);
|
|
|
|
- desc = current->mm->context.ldt;
|
|
|
|
- desc = (void *)desc + (seg & ~7);
|
|
|
|
- } else {
|
|
|
|
- /* Must disable preemption while reading the GDT. */
|
|
|
|
- desc = (u32 *)get_cpu_gdt_table(get_cpu());
|
|
|
|
- desc = (void *)desc + (seg & ~7);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Decode the code segment base from the descriptor */
|
|
|
|
- base = get_desc_base((struct desc_struct *)desc);
|
|
|
|
-
|
|
|
|
- if (seg & (1<<2))
|
|
|
|
- mutex_unlock(¤t->mm->context.lock);
|
|
|
|
- else
|
|
|
|
- put_cpu();
|
|
|
|
-
|
|
|
|
- /* Adjust EIP and segment limit, and clamp at the kernel limit.
|
|
|
|
- It's legitimate for segments to wrap at 0xffffffff. */
|
|
|
|
- seg_limit += base;
|
|
|
|
- if (seg_limit < *eip_limit && seg_limit >= base)
|
|
|
|
- *eip_limit = seg_limit;
|
|
|
|
- return ip + base;
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* X86_32
|
|
* X86_32
|
|
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
|
|
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
|