fault_32.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * linux/arch/i386/mm/fault.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. */
  6. #include <linux/signal.h>
  7. #include <linux/sched.h>
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/mman.h>
  14. #include <linux/mm.h>
  15. #include <linux/smp.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/init.h>
  18. #include <linux/tty.h>
  19. #include <linux/vt_kern.h> /* For unblank_screen() */
  20. #include <linux/highmem.h>
  21. #include <linux/bootmem.h> /* for max_low_pfn */
  22. #include <linux/vmalloc.h>
  23. #include <linux/module.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/kdebug.h>
  27. #include <asm/system.h>
  28. #include <asm/desc.h>
  29. #include <asm/segment.h>
  30. extern void die(const char *,struct pt_regs *,long);
  31. #ifdef CONFIG_KPROBES
  32. static inline int notify_page_fault(struct pt_regs *regs)
  33. {
  34. int ret = 0;
  35. /* kprobe_running() needs smp_processor_id() */
  36. if (!user_mode_vm(regs)) {
  37. preempt_disable();
  38. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  39. ret = 1;
  40. preempt_enable();
  41. }
  42. return ret;
  43. }
  44. #else
  45. static inline int notify_page_fault(struct pt_regs *regs)
  46. {
  47. return 0;
  48. }
  49. #endif
  50. /*
  51. * Return EIP plus the CS segment base. The segment limit is also
  52. * adjusted, clamped to the kernel/user address space (whichever is
  53. * appropriate), and returned in *eip_limit.
  54. *
  55. * The segment is checked, because it might have been changed by another
  56. * task between the original faulting instruction and here.
  57. *
  58. * If CS is no longer a valid code segment, or if EIP is beyond the
  59. * limit, or if it is a kernel address when CS is not a kernel segment,
  60. * then the returned value will be greater than *eip_limit.
  61. *
  62. * This is slow, but is very rarely executed.
  63. */
  64. static inline unsigned long get_segment_eip(struct pt_regs *regs,
  65. unsigned long *eip_limit)
  66. {
  67. unsigned long ip = regs->ip;
  68. unsigned seg = regs->cs & 0xffff;
  69. u32 seg_ar, seg_limit, base, *desc;
  70. /* Unlikely, but must come before segment checks. */
  71. if (unlikely(regs->flags & VM_MASK)) {
  72. base = seg << 4;
  73. *eip_limit = base + 0xffff;
  74. return base + (ip & 0xffff);
  75. }
  76. /* The standard kernel/user address space limit. */
  77. *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
  78. /* By far the most common cases. */
  79. if (likely(SEGMENT_IS_FLAT_CODE(seg)))
  80. return ip;
  81. /* Check the segment exists, is within the current LDT/GDT size,
  82. that kernel/user (ring 0..3) has the appropriate privilege,
  83. that it's a code segment, and get the limit. */
  84. __asm__ ("larl %3,%0; lsll %3,%1"
  85. : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
  86. if ((~seg_ar & 0x9800) || ip > seg_limit) {
  87. *eip_limit = 0;
  88. return 1; /* So that returned ip > *eip_limit. */
  89. }
  90. /* Get the GDT/LDT descriptor base.
  91. When you look for races in this code remember that
  92. LDT and other horrors are only used in user space. */
  93. if (seg & (1<<2)) {
  94. /* Must lock the LDT while reading it. */
  95. mutex_lock(&current->mm->context.lock);
  96. desc = current->mm->context.ldt;
  97. desc = (void *)desc + (seg & ~7);
  98. } else {
  99. /* Must disable preemption while reading the GDT. */
  100. desc = (u32 *)get_cpu_gdt_table(get_cpu());
  101. desc = (void *)desc + (seg & ~7);
  102. }
  103. /* Decode the code segment base from the descriptor */
  104. base = get_desc_base((struct desc_struct *)desc);
  105. if (seg & (1<<2)) {
  106. mutex_unlock(&current->mm->context.lock);
  107. } else
  108. put_cpu();
  109. /* Adjust EIP and segment limit, and clamp at the kernel limit.
  110. It's legitimate for segments to wrap at 0xffffffff. */
  111. seg_limit += base;
  112. if (seg_limit < *eip_limit && seg_limit >= base)
  113. *eip_limit = seg_limit;
  114. return ip + base;
  115. }
  116. /*
  117. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  118. * Check that here and ignore it.
  119. */
  120. static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
  121. {
  122. unsigned long limit;
  123. unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
  124. int scan_more = 1;
  125. int prefetch = 0;
  126. int i;
  127. for (i = 0; scan_more && i < 15; i++) {
  128. unsigned char opcode;
  129. unsigned char instr_hi;
  130. unsigned char instr_lo;
  131. if (instr > (unsigned char *)limit)
  132. break;
  133. if (probe_kernel_address(instr, opcode))
  134. break;
  135. instr_hi = opcode & 0xf0;
  136. instr_lo = opcode & 0x0f;
  137. instr++;
  138. switch (instr_hi) {
  139. case 0x20:
  140. case 0x30:
  141. /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
  142. scan_more = ((instr_lo & 7) == 0x6);
  143. break;
  144. case 0x60:
  145. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  146. scan_more = (instr_lo & 0xC) == 0x4;
  147. break;
  148. case 0xF0:
  149. /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
  150. scan_more = !instr_lo || (instr_lo>>1) == 1;
  151. break;
  152. case 0x00:
  153. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  154. scan_more = 0;
  155. if (instr > (unsigned char *)limit)
  156. break;
  157. if (probe_kernel_address(instr, opcode))
  158. break;
  159. prefetch = (instr_lo == 0xF) &&
  160. (opcode == 0x0D || opcode == 0x18);
  161. break;
  162. default:
  163. scan_more = 0;
  164. break;
  165. }
  166. }
  167. return prefetch;
  168. }
  169. static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
  170. unsigned long error_code)
  171. {
  172. if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
  173. boot_cpu_data.x86 >= 6)) {
  174. /* Catch an obscure case of prefetch inside an NX page. */
  175. if (nx_enabled && (error_code & 16))
  176. return 0;
  177. return __is_prefetch(regs, addr);
  178. }
  179. return 0;
  180. }
  181. static noinline void force_sig_info_fault(int si_signo, int si_code,
  182. unsigned long address, struct task_struct *tsk)
  183. {
  184. siginfo_t info;
  185. info.si_signo = si_signo;
  186. info.si_errno = 0;
  187. info.si_code = si_code;
  188. info.si_addr = (void __user *)address;
  189. force_sig_info(si_signo, &info, tsk);
  190. }
  191. void do_invalid_op(struct pt_regs *, unsigned long);
  192. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  193. {
  194. unsigned index = pgd_index(address);
  195. pgd_t *pgd_k;
  196. pud_t *pud, *pud_k;
  197. pmd_t *pmd, *pmd_k;
  198. pgd += index;
  199. pgd_k = init_mm.pgd + index;
  200. if (!pgd_present(*pgd_k))
  201. return NULL;
  202. /*
  203. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  204. * and redundant with the set_pmd() on non-PAE. As would
  205. * set_pud.
  206. */
  207. pud = pud_offset(pgd, address);
  208. pud_k = pud_offset(pgd_k, address);
  209. if (!pud_present(*pud_k))
  210. return NULL;
  211. pmd = pmd_offset(pud, address);
  212. pmd_k = pmd_offset(pud_k, address);
  213. if (!pmd_present(*pmd_k))
  214. return NULL;
  215. if (!pmd_present(*pmd)) {
  216. set_pmd(pmd, *pmd_k);
  217. arch_flush_lazy_mmu_mode();
  218. } else
  219. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  220. return pmd_k;
  221. }
  222. /*
  223. * Handle a fault on the vmalloc or module mapping area
  224. *
  225. * This assumes no large pages in there.
  226. */
  227. static inline int vmalloc_fault(unsigned long address)
  228. {
  229. unsigned long pgd_paddr;
  230. pmd_t *pmd_k;
  231. pte_t *pte_k;
  232. /*
  233. * Synchronize this task's top level page-table
  234. * with the 'reference' page table.
  235. *
  236. * Do _not_ use "current" here. We might be inside
  237. * an interrupt in the middle of a task switch..
  238. */
  239. pgd_paddr = read_cr3();
  240. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  241. if (!pmd_k)
  242. return -1;
  243. pte_k = pte_offset_kernel(pmd_k, address);
  244. if (!pte_present(*pte_k))
  245. return -1;
  246. return 0;
  247. }
  248. int show_unhandled_signals = 1;
  249. /*
  250. * This routine handles page faults. It determines the address,
  251. * and the problem, and then passes it off to one of the appropriate
  252. * routines.
  253. *
  254. * error_code:
  255. * bit 0 == 0 means no page found, 1 means protection fault
  256. * bit 1 == 0 means read, 1 means write
  257. * bit 2 == 0 means kernel, 1 means user-mode
  258. * bit 3 == 1 means use of reserved bit detected
  259. * bit 4 == 1 means fault was an instruction fetch
  260. */
  261. void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
  262. {
  263. struct task_struct *tsk;
  264. struct mm_struct *mm;
  265. struct vm_area_struct * vma;
  266. unsigned long address;
  267. int write, si_code;
  268. int fault;
  269. /*
  270. * We can fault from pretty much anywhere, with unknown IRQ state.
  271. */
  272. trace_hardirqs_fixup();
  273. /* get the address */
  274. address = read_cr2();
  275. tsk = current;
  276. si_code = SEGV_MAPERR;
  277. /*
  278. * We fault-in kernel-space virtual memory on-demand. The
  279. * 'reference' page table is init_mm.pgd.
  280. *
  281. * NOTE! We MUST NOT take any locks for this case. We may
  282. * be in an interrupt or a critical region, and should
  283. * only copy the information from the master page table,
  284. * nothing more.
  285. *
  286. * This verifies that the fault happens in kernel space
  287. * (error_code & 4) == 0, and that the fault was not a
  288. * protection error (error_code & 9) == 0.
  289. */
  290. if (unlikely(address >= TASK_SIZE)) {
  291. if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
  292. return;
  293. if (notify_page_fault(regs))
  294. return;
  295. /*
  296. * Don't take the mm semaphore here. If we fixup a prefetch
  297. * fault we could otherwise deadlock.
  298. */
  299. goto bad_area_nosemaphore;
  300. }
  301. if (notify_page_fault(regs))
  302. return;
  303. /* It's safe to allow irq's after cr2 has been saved and the vmalloc
  304. fault has been handled. */
  305. if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
  306. local_irq_enable();
  307. mm = tsk->mm;
  308. /*
  309. * If we're in an interrupt, have no user context or are running in an
  310. * atomic region then we must not take the fault..
  311. */
  312. if (in_atomic() || !mm)
  313. goto bad_area_nosemaphore;
  314. /* When running in the kernel we expect faults to occur only to
  315. * addresses in user space. All other faults represent errors in the
  316. * kernel and should generate an OOPS. Unfortunately, in the case of an
  317. * erroneous fault occurring in a code path which already holds mmap_sem
  318. * we will deadlock attempting to validate the fault against the
  319. * address space. Luckily the kernel only validly references user
  320. * space from well defined areas of code, which are listed in the
  321. * exceptions table.
  322. *
  323. * As the vast majority of faults will be valid we will only perform
  324. * the source reference check when there is a possibility of a deadlock.
  325. * Attempt to lock the address space, if we cannot we then validate the
  326. * source. If this is invalid we can skip the address space check,
  327. * thus avoiding the deadlock.
  328. */
  329. if (!down_read_trylock(&mm->mmap_sem)) {
  330. if ((error_code & 4) == 0 &&
  331. !search_exception_tables(regs->ip))
  332. goto bad_area_nosemaphore;
  333. down_read(&mm->mmap_sem);
  334. }
  335. vma = find_vma(mm, address);
  336. if (!vma)
  337. goto bad_area;
  338. if (vma->vm_start <= address)
  339. goto good_area;
  340. if (!(vma->vm_flags & VM_GROWSDOWN))
  341. goto bad_area;
  342. if (error_code & 4) {
  343. /*
  344. * Accessing the stack below %sp is always a bug.
  345. * The large cushion allows instructions like enter
  346. * and pusha to work. ("enter $65535,$31" pushes
  347. * 32 pointers and then decrements %sp by 65535.)
  348. */
  349. if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
  350. goto bad_area;
  351. }
  352. if (expand_stack(vma, address))
  353. goto bad_area;
  354. /*
  355. * Ok, we have a good vm_area for this memory access, so
  356. * we can handle it..
  357. */
  358. good_area:
  359. si_code = SEGV_ACCERR;
  360. write = 0;
  361. switch (error_code & 3) {
  362. default: /* 3: write, present */
  363. /* fall through */
  364. case 2: /* write, not present */
  365. if (!(vma->vm_flags & VM_WRITE))
  366. goto bad_area;
  367. write++;
  368. break;
  369. case 1: /* read, present */
  370. goto bad_area;
  371. case 0: /* read, not present */
  372. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  373. goto bad_area;
  374. }
  375. survive:
  376. /*
  377. * If for any reason at all we couldn't handle the fault,
  378. * make sure we exit gracefully rather than endlessly redo
  379. * the fault.
  380. */
  381. fault = handle_mm_fault(mm, vma, address, write);
  382. if (unlikely(fault & VM_FAULT_ERROR)) {
  383. if (fault & VM_FAULT_OOM)
  384. goto out_of_memory;
  385. else if (fault & VM_FAULT_SIGBUS)
  386. goto do_sigbus;
  387. BUG();
  388. }
  389. if (fault & VM_FAULT_MAJOR)
  390. tsk->maj_flt++;
  391. else
  392. tsk->min_flt++;
  393. /*
  394. * Did it hit the DOS screen memory VA from vm86 mode?
  395. */
  396. if (regs->flags & VM_MASK) {
  397. unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
  398. if (bit < 32)
  399. tsk->thread.screen_bitmap |= 1 << bit;
  400. }
  401. up_read(&mm->mmap_sem);
  402. return;
  403. /*
  404. * Something tried to access memory that isn't in our memory map..
  405. * Fix it, but check if it's kernel or user first..
  406. */
  407. bad_area:
  408. up_read(&mm->mmap_sem);
  409. bad_area_nosemaphore:
  410. /* User mode accesses just cause a SIGSEGV */
  411. if (error_code & 4) {
  412. /*
  413. * It's possible to have interrupts off here.
  414. */
  415. local_irq_enable();
  416. /*
  417. * Valid to do another page fault here because this one came
  418. * from user space.
  419. */
  420. if (is_prefetch(regs, address, error_code))
  421. return;
  422. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  423. printk_ratelimit()) {
  424. printk("%s%s[%d]: segfault at %08lx ip %08lx "
  425. "sp %08lx error %lx\n",
  426. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  427. tsk->comm, task_pid_nr(tsk), address, regs->ip,
  428. regs->sp, error_code);
  429. }
  430. tsk->thread.cr2 = address;
  431. /* Kernel addresses are always protection faults */
  432. tsk->thread.error_code = error_code | (address >= TASK_SIZE);
  433. tsk->thread.trap_no = 14;
  434. force_sig_info_fault(SIGSEGV, si_code, address, tsk);
  435. return;
  436. }
  437. #ifdef CONFIG_X86_F00F_BUG
  438. /*
  439. * Pentium F0 0F C7 C8 bug workaround.
  440. */
  441. if (boot_cpu_data.f00f_bug) {
  442. unsigned long nr;
  443. nr = (address - idt_descr.address) >> 3;
  444. if (nr == 6) {
  445. do_invalid_op(regs, 0);
  446. return;
  447. }
  448. }
  449. #endif
  450. no_context:
  451. /* Are we prepared to handle this kernel fault? */
  452. if (fixup_exception(regs))
  453. return;
  454. /*
  455. * Valid to do another page fault here, because if this fault
  456. * had been triggered by is_prefetch fixup_exception would have
  457. * handled it.
  458. */
  459. if (is_prefetch(regs, address, error_code))
  460. return;
  461. /*
  462. * Oops. The kernel tried to access some bad page. We'll have to
  463. * terminate things with extreme prejudice.
  464. */
  465. bust_spinlocks(1);
  466. if (oops_may_print()) {
  467. __typeof__(pte_val(__pte(0))) page;
  468. #ifdef CONFIG_X86_PAE
  469. if (error_code & 16) {
  470. pte_t *pte = lookup_address(address);
  471. if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
  472. printk(KERN_CRIT "kernel tried to execute "
  473. "NX-protected page - exploit attempt? "
  474. "(uid: %d)\n", current->uid);
  475. }
  476. #endif
  477. if (address < PAGE_SIZE)
  478. printk(KERN_ALERT "BUG: unable to handle kernel NULL "
  479. "pointer dereference");
  480. else
  481. printk(KERN_ALERT "BUG: unable to handle kernel paging"
  482. " request");
  483. printk(" at virtual address %08lx\n",address);
  484. printk(KERN_ALERT "printing ip: %08lx ", regs->ip);
  485. page = read_cr3();
  486. page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
  487. #ifdef CONFIG_X86_PAE
  488. printk("*pdpt = %016Lx ", page);
  489. if ((page >> PAGE_SHIFT) < max_low_pfn
  490. && page & _PAGE_PRESENT) {
  491. page &= PAGE_MASK;
  492. page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
  493. & (PTRS_PER_PMD - 1)];
  494. printk(KERN_CONT "*pde = %016Lx ", page);
  495. page &= ~_PAGE_NX;
  496. }
  497. #else
  498. printk("*pde = %08lx ", page);
  499. #endif
  500. /*
  501. * We must not directly access the pte in the highpte
  502. * case if the page table is located in highmem.
  503. * And let's rather not kmap-atomic the pte, just in case
  504. * it's allocated already.
  505. */
  506. if ((page >> PAGE_SHIFT) < max_low_pfn
  507. && (page & _PAGE_PRESENT)
  508. && !(page & _PAGE_PSE)) {
  509. page &= PAGE_MASK;
  510. page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
  511. & (PTRS_PER_PTE - 1)];
  512. printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
  513. }
  514. printk("\n");
  515. }
  516. tsk->thread.cr2 = address;
  517. tsk->thread.trap_no = 14;
  518. tsk->thread.error_code = error_code;
  519. die("Oops", regs, error_code);
  520. bust_spinlocks(0);
  521. do_exit(SIGKILL);
  522. /*
  523. * We ran out of memory, or some other thing happened to us that made
  524. * us unable to handle the page fault gracefully.
  525. */
  526. out_of_memory:
  527. up_read(&mm->mmap_sem);
  528. if (is_global_init(tsk)) {
  529. yield();
  530. down_read(&mm->mmap_sem);
  531. goto survive;
  532. }
  533. printk("VM: killing process %s\n", tsk->comm);
  534. if (error_code & 4)
  535. do_group_exit(SIGKILL);
  536. goto no_context;
  537. do_sigbus:
  538. up_read(&mm->mmap_sem);
  539. /* Kernel mode? Handle exceptions or die */
  540. if (!(error_code & 4))
  541. goto no_context;
  542. /* User space => ok to do another page fault */
  543. if (is_prefetch(regs, address, error_code))
  544. return;
  545. tsk->thread.cr2 = address;
  546. tsk->thread.error_code = error_code;
  547. tsk->thread.trap_no = 14;
  548. force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
  549. }
  550. void vmalloc_sync_all(void)
  551. {
  552. /*
  553. * Note that races in the updates of insync and start aren't
  554. * problematic: insync can only get set bits added, and updates to
  555. * start are only improving performance (without affecting correctness
  556. * if undone).
  557. */
  558. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  559. static unsigned long start = TASK_SIZE;
  560. unsigned long address;
  561. if (SHARED_KERNEL_PMD)
  562. return;
  563. BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
  564. for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
  565. if (!test_bit(pgd_index(address), insync)) {
  566. unsigned long flags;
  567. struct page *page;
  568. spin_lock_irqsave(&pgd_lock, flags);
  569. for (page = pgd_list; page; page =
  570. (struct page *)page->index)
  571. if (!vmalloc_sync_one(page_address(page),
  572. address)) {
  573. BUG_ON(page != pgd_list);
  574. break;
  575. }
  576. spin_unlock_irqrestore(&pgd_lock, flags);
  577. if (!page)
  578. set_bit(pgd_index(address), insync);
  579. }
  580. if (address == start && test_bit(pgd_index(address), insync))
  581. start = address + PGDIR_SIZE;
  582. }
  583. }