fault.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
  4. */
  5. #include <linux/signal.h>
  6. #include <linux/sched.h>
  7. #include <linux/kernel.h>
  8. #include <linux/errno.h>
  9. #include <linux/string.h>
  10. #include <linux/types.h>
  11. #include <linux/ptrace.h>
  12. #include <linux/mman.h>
  13. #include <linux/mm.h>
  14. #include <linux/smp.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/init.h>
  17. #include <linux/tty.h>
  18. #include <linux/vt_kern.h> /* For unblank_screen() */
  19. #include <linux/compiler.h>
  20. #include <linux/highmem.h>
  21. #include <linux/bootmem.h> /* for max_low_pfn */
  22. #include <linux/vmalloc.h>
  23. #include <linux/module.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/kdebug.h>
  27. #include <asm/system.h>
  28. #include <asm/desc.h>
  29. #include <asm/segment.h>
  30. #include <asm/pgalloc.h>
  31. #include <asm/smp.h>
  32. #include <asm/tlbflush.h>
  33. #include <asm/proto.h>
  34. #include <asm-generic/sections.h>
  35. /*
  36. * Page fault error code bits
  37. * bit 0 == 0 means no page found, 1 means protection fault
  38. * bit 1 == 0 means read, 1 means write
  39. * bit 2 == 0 means kernel, 1 means user-mode
  40. * bit 3 == 1 means use of reserved bit detected
  41. * bit 4 == 1 means fault was an instruction fetch
  42. */
  43. #define PF_PROT (1<<0)
  44. #define PF_WRITE (1<<1)
  45. #define PF_USER (1<<2)
  46. #define PF_RSVD (1<<3)
  47. #define PF_INSTR (1<<4)
  48. static inline int notify_page_fault(struct pt_regs *regs)
  49. {
  50. #ifdef CONFIG_KPROBES
  51. int ret = 0;
  52. /* kprobe_running() needs smp_processor_id() */
  53. #ifdef CONFIG_X86_32
  54. if (!user_mode_vm(regs)) {
  55. #else
  56. if (!user_mode(regs)) {
  57. #endif
  58. preempt_disable();
  59. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  60. ret = 1;
  61. preempt_enable();
  62. }
  63. return ret;
  64. #else
  65. return 0;
  66. #endif
  67. }
  68. /*
  69. * X86_32
  70. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  71. * Check that here and ignore it.
  72. *
  73. * X86_64
  74. * Sometimes the CPU reports invalid exceptions on prefetch.
  75. * Check that here and ignore it.
  76. *
  77. * Opcode checker based on code by Richard Brunner
  78. */
  79. static int is_prefetch(struct pt_regs *regs, unsigned long addr,
  80. unsigned long error_code)
  81. {
  82. unsigned char *instr;
  83. int scan_more = 1;
  84. int prefetch = 0;
  85. unsigned char *max_instr;
  86. /*
  87. * If it was a exec (instruction fetch) fault on NX page, then
  88. * do not ignore the fault:
  89. */
  90. if (error_code & PF_INSTR)
  91. return 0;
  92. instr = (unsigned char *)convert_ip_to_linear(current, regs);
  93. max_instr = instr + 15;
  94. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
  95. return 0;
  96. while (scan_more && instr < max_instr) {
  97. unsigned char opcode;
  98. unsigned char instr_hi;
  99. unsigned char instr_lo;
  100. if (probe_kernel_address(instr, opcode))
  101. break;
  102. instr_hi = opcode & 0xf0;
  103. instr_lo = opcode & 0x0f;
  104. instr++;
  105. switch (instr_hi) {
  106. case 0x20:
  107. case 0x30:
  108. /*
  109. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  110. * In X86_64 long mode, the CPU will signal invalid
  111. * opcode if some of these prefixes are present so
  112. * X86_64 will never get here anyway
  113. */
  114. scan_more = ((instr_lo & 7) == 0x6);
  115. break;
  116. #ifdef CONFIG_X86_64
  117. case 0x40:
  118. /*
  119. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  120. * Need to figure out under what instruction mode the
  121. * instruction was issued. Could check the LDT for lm,
  122. * but for now it's good enough to assume that long
  123. * mode only uses well known segments or kernel.
  124. */
  125. scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
  126. break;
  127. #endif
  128. case 0x60:
  129. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  130. scan_more = (instr_lo & 0xC) == 0x4;
  131. break;
  132. case 0xF0:
  133. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  134. scan_more = !instr_lo || (instr_lo>>1) == 1;
  135. break;
  136. case 0x00:
  137. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  138. scan_more = 0;
  139. if (probe_kernel_address(instr, opcode))
  140. break;
  141. prefetch = (instr_lo == 0xF) &&
  142. (opcode == 0x0D || opcode == 0x18);
  143. break;
  144. default:
  145. scan_more = 0;
  146. break;
  147. }
  148. }
  149. return prefetch;
  150. }
  151. static void force_sig_info_fault(int si_signo, int si_code,
  152. unsigned long address, struct task_struct *tsk)
  153. {
  154. siginfo_t info;
  155. info.si_signo = si_signo;
  156. info.si_errno = 0;
  157. info.si_code = si_code;
  158. info.si_addr = (void __user *)address;
  159. force_sig_info(si_signo, &info, tsk);
  160. }
  161. #ifdef CONFIG_X86_64
  162. static int bad_address(void *p)
  163. {
  164. unsigned long dummy;
  165. return probe_kernel_address((unsigned long *)p, dummy);
  166. }
  167. #endif
  168. static void dump_pagetable(unsigned long address)
  169. {
  170. #ifdef CONFIG_X86_32
  171. __typeof__(pte_val(__pte(0))) page;
  172. page = read_cr3();
  173. page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
  174. #ifdef CONFIG_X86_PAE
  175. printk("*pdpt = %016Lx ", page);
  176. if ((page >> PAGE_SHIFT) < max_low_pfn
  177. && page & _PAGE_PRESENT) {
  178. page &= PAGE_MASK;
  179. page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
  180. & (PTRS_PER_PMD - 1)];
  181. printk(KERN_CONT "*pde = %016Lx ", page);
  182. page &= ~_PAGE_NX;
  183. }
  184. #else
  185. printk("*pde = %08lx ", page);
  186. #endif
  187. /*
  188. * We must not directly access the pte in the highpte
  189. * case if the page table is located in highmem.
  190. * And let's rather not kmap-atomic the pte, just in case
  191. * it's allocated already.
  192. */
  193. if ((page >> PAGE_SHIFT) < max_low_pfn
  194. && (page & _PAGE_PRESENT)
  195. && !(page & _PAGE_PSE)) {
  196. page &= PAGE_MASK;
  197. page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
  198. & (PTRS_PER_PTE - 1)];
  199. printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
  200. }
  201. printk("\n");
  202. #else /* CONFIG_X86_64 */
  203. pgd_t *pgd;
  204. pud_t *pud;
  205. pmd_t *pmd;
  206. pte_t *pte;
  207. pgd = (pgd_t *)read_cr3();
  208. pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
  209. pgd += pgd_index(address);
  210. if (bad_address(pgd)) goto bad;
  211. printk("PGD %lx ", pgd_val(*pgd));
  212. if (!pgd_present(*pgd)) goto ret;
  213. pud = pud_offset(pgd, address);
  214. if (bad_address(pud)) goto bad;
  215. printk("PUD %lx ", pud_val(*pud));
  216. if (!pud_present(*pud) || pud_large(*pud))
  217. goto ret;
  218. pmd = pmd_offset(pud, address);
  219. if (bad_address(pmd)) goto bad;
  220. printk("PMD %lx ", pmd_val(*pmd));
  221. if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
  222. pte = pte_offset_kernel(pmd, address);
  223. if (bad_address(pte)) goto bad;
  224. printk("PTE %lx", pte_val(*pte));
  225. ret:
  226. printk("\n");
  227. return;
  228. bad:
  229. printk("BAD\n");
  230. #endif
  231. }
  232. #ifdef CONFIG_X86_32
  233. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  234. {
  235. unsigned index = pgd_index(address);
  236. pgd_t *pgd_k;
  237. pud_t *pud, *pud_k;
  238. pmd_t *pmd, *pmd_k;
  239. pgd += index;
  240. pgd_k = init_mm.pgd + index;
  241. if (!pgd_present(*pgd_k))
  242. return NULL;
  243. /*
  244. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  245. * and redundant with the set_pmd() on non-PAE. As would
  246. * set_pud.
  247. */
  248. pud = pud_offset(pgd, address);
  249. pud_k = pud_offset(pgd_k, address);
  250. if (!pud_present(*pud_k))
  251. return NULL;
  252. pmd = pmd_offset(pud, address);
  253. pmd_k = pmd_offset(pud_k, address);
  254. if (!pmd_present(*pmd_k))
  255. return NULL;
  256. if (!pmd_present(*pmd)) {
  257. set_pmd(pmd, *pmd_k);
  258. arch_flush_lazy_mmu_mode();
  259. } else
  260. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  261. return pmd_k;
  262. }
  263. #endif
  264. #ifdef CONFIG_X86_64
  265. static const char errata93_warning[] =
  266. KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  267. KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
  268. KERN_ERR "******* Please consider a BIOS update.\n"
  269. KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
  270. #endif
  271. /* Workaround for K8 erratum #93 & buggy BIOS.
  272. BIOS SMM functions are required to use a specific workaround
  273. to avoid corruption of the 64bit RIP register on C stepping K8.
  274. A lot of BIOS that didn't get tested properly miss this.
  275. The OS sees this as a page fault with the upper 32bits of RIP cleared.
  276. Try to work around it here.
  277. Note we only handle faults in kernel here.
  278. Does nothing for X86_32
  279. */
  280. static int is_errata93(struct pt_regs *regs, unsigned long address)
  281. {
  282. #ifdef CONFIG_X86_64
  283. static int warned;
  284. if (address != regs->ip)
  285. return 0;
  286. if ((address >> 32) != 0)
  287. return 0;
  288. address |= 0xffffffffUL << 32;
  289. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  290. (address >= MODULES_VADDR && address <= MODULES_END)) {
  291. if (!warned) {
  292. printk(errata93_warning);
  293. warned = 1;
  294. }
  295. regs->ip = address;
  296. return 1;
  297. }
  298. #endif
  299. return 0;
  300. }
  301. /*
  302. * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
  303. * addresses >4GB. We catch this in the page fault handler because these
  304. * addresses are not reachable. Just detect this case and return. Any code
  305. * segment in LDT is compatibility mode.
  306. */
  307. static int is_errata100(struct pt_regs *regs, unsigned long address)
  308. {
  309. #ifdef CONFIG_X86_64
  310. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
  311. (address >> 32))
  312. return 1;
  313. #endif
  314. return 0;
  315. }
  316. void do_invalid_op(struct pt_regs *, unsigned long);
  317. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  318. {
  319. #ifdef CONFIG_X86_F00F_BUG
  320. unsigned long nr;
  321. /*
  322. * Pentium F0 0F C7 C8 bug workaround.
  323. */
  324. if (boot_cpu_data.f00f_bug) {
  325. nr = (address - idt_descr.address) >> 3;
  326. if (nr == 6) {
  327. do_invalid_op(regs, 0);
  328. return 1;
  329. }
  330. }
  331. #endif
  332. return 0;
  333. }
  334. static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  335. unsigned long address)
  336. {
  337. #ifdef CONFIG_X86_32
  338. if (!oops_may_print())
  339. return;
  340. #endif
  341. #ifdef CONFIG_X86_PAE
  342. if (error_code & PF_INSTR) {
  343. unsigned int level;
  344. pte_t *pte = lookup_address(address, &level);
  345. if (pte && pte_present(*pte) && !pte_exec(*pte))
  346. printk(KERN_CRIT "kernel tried to execute "
  347. "NX-protected page - exploit attempt? "
  348. "(uid: %d)\n", current->uid);
  349. }
  350. #endif
  351. printk(KERN_ALERT "BUG: unable to handle kernel ");
  352. if (address < PAGE_SIZE)
  353. printk(KERN_CONT "NULL pointer dereference");
  354. else
  355. printk(KERN_CONT "paging request");
  356. #ifdef CONFIG_X86_32
  357. printk(KERN_CONT " at %08lx\n", address);
  358. #else
  359. printk(KERN_CONT " at %016lx\n", address);
  360. #endif
  361. printk(KERN_ALERT "IP:");
  362. printk_address(regs->ip, 1);
  363. dump_pagetable(address);
  364. }
  365. #ifdef CONFIG_X86_64
  366. static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
  367. unsigned long error_code)
  368. {
  369. unsigned long flags = oops_begin();
  370. struct task_struct *tsk;
  371. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  372. current->comm, address);
  373. dump_pagetable(address);
  374. tsk = current;
  375. tsk->thread.cr2 = address;
  376. tsk->thread.trap_no = 14;
  377. tsk->thread.error_code = error_code;
  378. if (__die("Bad pagetable", regs, error_code))
  379. regs = NULL;
  380. oops_end(flags, regs, SIGKILL);
  381. }
  382. #endif
  383. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  384. {
  385. if ((error_code & PF_WRITE) && !pte_write(*pte))
  386. return 0;
  387. if ((error_code & PF_INSTR) && !pte_exec(*pte))
  388. return 0;
  389. return 1;
  390. }
  391. /*
  392. * Handle a spurious fault caused by a stale TLB entry. This allows
  393. * us to lazily refresh the TLB when increasing the permissions of a
  394. * kernel page (RO -> RW or NX -> X). Doing it eagerly is very
  395. * expensive since that implies doing a full cross-processor TLB
  396. * flush, even if no stale TLB entries exist on other processors.
  397. * There are no security implications to leaving a stale TLB when
  398. * increasing the permissions on a page.
  399. */
  400. static int spurious_fault(unsigned long address,
  401. unsigned long error_code)
  402. {
  403. pgd_t *pgd;
  404. pud_t *pud;
  405. pmd_t *pmd;
  406. pte_t *pte;
  407. /* Reserved-bit violation or user access to kernel space? */
  408. if (error_code & (PF_USER | PF_RSVD))
  409. return 0;
  410. pgd = init_mm.pgd + pgd_index(address);
  411. if (!pgd_present(*pgd))
  412. return 0;
  413. pud = pud_offset(pgd, address);
  414. if (!pud_present(*pud))
  415. return 0;
  416. if (pud_large(*pud))
  417. return spurious_fault_check(error_code, (pte_t *) pud);
  418. pmd = pmd_offset(pud, address);
  419. if (!pmd_present(*pmd))
  420. return 0;
  421. if (pmd_large(*pmd))
  422. return spurious_fault_check(error_code, (pte_t *) pmd);
  423. pte = pte_offset_kernel(pmd, address);
  424. if (!pte_present(*pte))
  425. return 0;
  426. return spurious_fault_check(error_code, pte);
  427. }
  428. /*
  429. * X86_32
  430. * Handle a fault on the vmalloc or module mapping area
  431. *
  432. * X86_64
  433. * Handle a fault on the vmalloc area
  434. *
  435. * This assumes no large pages in there.
  436. */
  437. static int vmalloc_fault(unsigned long address)
  438. {
  439. #ifdef CONFIG_X86_32
  440. unsigned long pgd_paddr;
  441. pmd_t *pmd_k;
  442. pte_t *pte_k;
  443. /*
  444. * Synchronize this task's top level page-table
  445. * with the 'reference' page table.
  446. *
  447. * Do _not_ use "current" here. We might be inside
  448. * an interrupt in the middle of a task switch..
  449. */
  450. pgd_paddr = read_cr3();
  451. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  452. if (!pmd_k)
  453. return -1;
  454. pte_k = pte_offset_kernel(pmd_k, address);
  455. if (!pte_present(*pte_k))
  456. return -1;
  457. return 0;
  458. #else
  459. pgd_t *pgd, *pgd_ref;
  460. pud_t *pud, *pud_ref;
  461. pmd_t *pmd, *pmd_ref;
  462. pte_t *pte, *pte_ref;
  463. /* Make sure we are in vmalloc area */
  464. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  465. return -1;
  466. /* Copy kernel mappings over when needed. This can also
  467. happen within a race in page table update. In the later
  468. case just flush. */
  469. pgd = pgd_offset(current->mm ?: &init_mm, address);
  470. pgd_ref = pgd_offset_k(address);
  471. if (pgd_none(*pgd_ref))
  472. return -1;
  473. if (pgd_none(*pgd))
  474. set_pgd(pgd, *pgd_ref);
  475. else
  476. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  477. /* Below here mismatches are bugs because these lower tables
  478. are shared */
  479. pud = pud_offset(pgd, address);
  480. pud_ref = pud_offset(pgd_ref, address);
  481. if (pud_none(*pud_ref))
  482. return -1;
  483. if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
  484. BUG();
  485. pmd = pmd_offset(pud, address);
  486. pmd_ref = pmd_offset(pud_ref, address);
  487. if (pmd_none(*pmd_ref))
  488. return -1;
  489. if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
  490. BUG();
  491. pte_ref = pte_offset_kernel(pmd_ref, address);
  492. if (!pte_present(*pte_ref))
  493. return -1;
  494. pte = pte_offset_kernel(pmd, address);
  495. /* Don't use pte_page here, because the mappings can point
  496. outside mem_map, and the NUMA hash lookup cannot handle
  497. that. */
  498. if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
  499. BUG();
  500. return 0;
  501. #endif
  502. }
  503. int show_unhandled_signals = 1;
  504. /*
  505. * This routine handles page faults. It determines the address,
  506. * and the problem, and then passes it off to one of the appropriate
  507. * routines.
  508. */
  509. #ifdef CONFIG_X86_64
  510. asmlinkage
  511. #endif
  512. void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
  513. {
  514. struct task_struct *tsk;
  515. struct mm_struct *mm;
  516. struct vm_area_struct *vma;
  517. unsigned long address;
  518. int write, si_code;
  519. int fault;
  520. #ifdef CONFIG_X86_64
  521. unsigned long flags;
  522. #endif
  523. /*
  524. * We can fault from pretty much anywhere, with unknown IRQ state.
  525. */
  526. trace_hardirqs_fixup();
  527. tsk = current;
  528. mm = tsk->mm;
  529. prefetchw(&mm->mmap_sem);
  530. /* get the address */
  531. address = read_cr2();
  532. si_code = SEGV_MAPERR;
  533. if (notify_page_fault(regs))
  534. return;
  535. /*
  536. * We fault-in kernel-space virtual memory on-demand. The
  537. * 'reference' page table is init_mm.pgd.
  538. *
  539. * NOTE! We MUST NOT take any locks for this case. We may
  540. * be in an interrupt or a critical region, and should
  541. * only copy the information from the master page table,
  542. * nothing more.
  543. *
  544. * This verifies that the fault happens in kernel space
  545. * (error_code & 4) == 0, and that the fault was not a
  546. * protection error (error_code & 9) == 0.
  547. */
  548. #ifdef CONFIG_X86_32
  549. if (unlikely(address >= TASK_SIZE)) {
  550. #else
  551. if (unlikely(address >= TASK_SIZE64)) {
  552. #endif
  553. if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
  554. vmalloc_fault(address) >= 0)
  555. return;
  556. /* Can handle a stale RO->RW TLB */
  557. if (spurious_fault(address, error_code))
  558. return;
  559. /*
  560. * Don't take the mm semaphore here. If we fixup a prefetch
  561. * fault we could otherwise deadlock.
  562. */
  563. goto bad_area_nosemaphore;
  564. }
  565. #ifdef CONFIG_X86_32
  566. /* It's safe to allow irq's after cr2 has been saved and the vmalloc
  567. fault has been handled. */
  568. if (regs->flags & (X86_EFLAGS_IF | X86_VM_MASK))
  569. local_irq_enable();
  570. /*
  571. * If we're in an interrupt, have no user context or are running in an
  572. * atomic region then we must not take the fault.
  573. */
  574. if (in_atomic() || !mm)
  575. goto bad_area_nosemaphore;
  576. #else /* CONFIG_X86_64 */
  577. if (likely(regs->flags & X86_EFLAGS_IF))
  578. local_irq_enable();
  579. if (unlikely(error_code & PF_RSVD))
  580. pgtable_bad(address, regs, error_code);
  581. /*
  582. * If we're in an interrupt, have no user context or are running in an
  583. * atomic region then we must not take the fault.
  584. */
  585. if (unlikely(in_atomic() || !mm))
  586. goto bad_area_nosemaphore;
  587. /*
  588. * User-mode registers count as a user access even for any
  589. * potential system fault or CPU buglet.
  590. */
  591. if (user_mode_vm(regs))
  592. error_code |= PF_USER;
  593. again:
  594. #endif
  595. /* When running in the kernel we expect faults to occur only to
  596. * addresses in user space. All other faults represent errors in the
  597. * kernel and should generate an OOPS. Unfortunately, in the case of an
  598. * erroneous fault occurring in a code path which already holds mmap_sem
  599. * we will deadlock attempting to validate the fault against the
  600. * address space. Luckily the kernel only validly references user
  601. * space from well defined areas of code, which are listed in the
  602. * exceptions table.
  603. *
  604. * As the vast majority of faults will be valid we will only perform
  605. * the source reference check when there is a possibility of a deadlock.
  606. * Attempt to lock the address space, if we cannot we then validate the
  607. * source. If this is invalid we can skip the address space check,
  608. * thus avoiding the deadlock.
  609. */
  610. if (!down_read_trylock(&mm->mmap_sem)) {
  611. if ((error_code & PF_USER) == 0 &&
  612. !search_exception_tables(regs->ip))
  613. goto bad_area_nosemaphore;
  614. down_read(&mm->mmap_sem);
  615. }
  616. vma = find_vma(mm, address);
  617. if (!vma)
  618. goto bad_area;
  619. if (vma->vm_start <= address)
  620. goto good_area;
  621. if (!(vma->vm_flags & VM_GROWSDOWN))
  622. goto bad_area;
  623. if (error_code & PF_USER) {
  624. /*
  625. * Accessing the stack below %sp is always a bug.
  626. * The large cushion allows instructions like enter
  627. * and pusha to work. ("enter $65535,$31" pushes
  628. * 32 pointers and then decrements %sp by 65535.)
  629. */
  630. if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
  631. goto bad_area;
  632. }
  633. if (expand_stack(vma, address))
  634. goto bad_area;
  635. /*
  636. * Ok, we have a good vm_area for this memory access, so
  637. * we can handle it..
  638. */
  639. good_area:
  640. si_code = SEGV_ACCERR;
  641. write = 0;
  642. switch (error_code & (PF_PROT|PF_WRITE)) {
  643. default: /* 3: write, present */
  644. /* fall through */
  645. case PF_WRITE: /* write, not present */
  646. if (!(vma->vm_flags & VM_WRITE))
  647. goto bad_area;
  648. write++;
  649. break;
  650. case PF_PROT: /* read, present */
  651. goto bad_area;
  652. case 0: /* read, not present */
  653. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  654. goto bad_area;
  655. }
  656. #ifdef CONFIG_X86_32
  657. survive:
  658. #endif
  659. /*
  660. * If for any reason at all we couldn't handle the fault,
  661. * make sure we exit gracefully rather than endlessly redo
  662. * the fault.
  663. */
  664. fault = handle_mm_fault(mm, vma, address, write);
  665. if (unlikely(fault & VM_FAULT_ERROR)) {
  666. if (fault & VM_FAULT_OOM)
  667. goto out_of_memory;
  668. else if (fault & VM_FAULT_SIGBUS)
  669. goto do_sigbus;
  670. BUG();
  671. }
  672. if (fault & VM_FAULT_MAJOR)
  673. tsk->maj_flt++;
  674. else
  675. tsk->min_flt++;
  676. #ifdef CONFIG_X86_32
  677. /*
  678. * Did it hit the DOS screen memory VA from vm86 mode?
  679. */
  680. if (v8086_mode(regs)) {
  681. unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
  682. if (bit < 32)
  683. tsk->thread.screen_bitmap |= 1 << bit;
  684. }
  685. #endif
  686. up_read(&mm->mmap_sem);
  687. return;
  688. /*
  689. * Something tried to access memory that isn't in our memory map..
  690. * Fix it, but check if it's kernel or user first..
  691. */
  692. bad_area:
  693. up_read(&mm->mmap_sem);
  694. bad_area_nosemaphore:
  695. /* User mode accesses just cause a SIGSEGV */
  696. if (error_code & PF_USER) {
  697. /*
  698. * It's possible to have interrupts off here.
  699. */
  700. local_irq_enable();
  701. /*
  702. * Valid to do another page fault here because this one came
  703. * from user space.
  704. */
  705. if (is_prefetch(regs, address, error_code))
  706. return;
  707. if (is_errata100(regs, address))
  708. return;
  709. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  710. printk_ratelimit()) {
  711. printk(
  712. #ifdef CONFIG_X86_32
  713. "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
  714. #else
  715. "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
  716. #endif
  717. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  718. tsk->comm, task_pid_nr(tsk), address, regs->ip,
  719. regs->sp, error_code);
  720. print_vma_addr(" in ", regs->ip);
  721. printk("\n");
  722. }
  723. tsk->thread.cr2 = address;
  724. /* Kernel addresses are always protection faults */
  725. tsk->thread.error_code = error_code | (address >= TASK_SIZE);
  726. tsk->thread.trap_no = 14;
  727. force_sig_info_fault(SIGSEGV, si_code, address, tsk);
  728. return;
  729. }
  730. if (is_f00f_bug(regs, address))
  731. return;
  732. no_context:
  733. /* Are we prepared to handle this kernel fault? */
  734. if (fixup_exception(regs))
  735. return;
  736. /*
  737. * X86_32
  738. * Valid to do another page fault here, because if this fault
  739. * had been triggered by is_prefetch fixup_exception would have
  740. * handled it.
  741. *
  742. * X86_64
  743. * Hall of shame of CPU/BIOS bugs.
  744. */
  745. if (is_prefetch(regs, address, error_code))
  746. return;
  747. if (is_errata93(regs, address))
  748. return;
  749. /*
  750. * Oops. The kernel tried to access some bad page. We'll have to
  751. * terminate things with extreme prejudice.
  752. */
  753. #ifdef CONFIG_X86_32
  754. bust_spinlocks(1);
  755. #else
  756. flags = oops_begin();
  757. #endif
  758. show_fault_oops(regs, error_code, address);
  759. tsk->thread.cr2 = address;
  760. tsk->thread.trap_no = 14;
  761. tsk->thread.error_code = error_code;
  762. #ifdef CONFIG_X86_32
  763. die("Oops", regs, error_code);
  764. bust_spinlocks(0);
  765. do_exit(SIGKILL);
  766. #else
  767. if (__die("Oops", regs, error_code))
  768. regs = NULL;
  769. /* Executive summary in case the body of the oops scrolled away */
  770. printk(KERN_EMERG "CR2: %016lx\n", address);
  771. oops_end(flags, regs, SIGKILL);
  772. #endif
  773. /*
  774. * We ran out of memory, or some other thing happened to us that made
  775. * us unable to handle the page fault gracefully.
  776. */
  777. out_of_memory:
  778. up_read(&mm->mmap_sem);
  779. if (is_global_init(tsk)) {
  780. yield();
  781. #ifdef CONFIG_X86_32
  782. down_read(&mm->mmap_sem);
  783. goto survive;
  784. #else
  785. goto again;
  786. #endif
  787. }
  788. printk("VM: killing process %s\n", tsk->comm);
  789. if (error_code & PF_USER)
  790. do_group_exit(SIGKILL);
  791. goto no_context;
  792. do_sigbus:
  793. up_read(&mm->mmap_sem);
  794. /* Kernel mode? Handle exceptions or die */
  795. if (!(error_code & PF_USER))
  796. goto no_context;
  797. #ifdef CONFIG_X86_32
  798. /* User space => ok to do another page fault */
  799. if (is_prefetch(regs, address, error_code))
  800. return;
  801. #endif
  802. tsk->thread.cr2 = address;
  803. tsk->thread.error_code = error_code;
  804. tsk->thread.trap_no = 14;
  805. force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
  806. }
  807. DEFINE_SPINLOCK(pgd_lock);
  808. LIST_HEAD(pgd_list);
  809. void vmalloc_sync_all(void)
  810. {
  811. #ifdef CONFIG_X86_32
  812. /*
  813. * Note that races in the updates of insync and start aren't
  814. * problematic: insync can only get set bits added, and updates to
  815. * start are only improving performance (without affecting correctness
  816. * if undone).
  817. */
  818. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  819. static unsigned long start = TASK_SIZE;
  820. unsigned long address;
  821. if (SHARED_KERNEL_PMD)
  822. return;
  823. BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
  824. for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
  825. if (!test_bit(pgd_index(address), insync)) {
  826. unsigned long flags;
  827. struct page *page;
  828. spin_lock_irqsave(&pgd_lock, flags);
  829. list_for_each_entry(page, &pgd_list, lru) {
  830. if (!vmalloc_sync_one(page_address(page),
  831. address))
  832. break;
  833. }
  834. spin_unlock_irqrestore(&pgd_lock, flags);
  835. if (!page)
  836. set_bit(pgd_index(address), insync);
  837. }
  838. if (address == start && test_bit(pgd_index(address), insync))
  839. start = address + PGDIR_SIZE;
  840. }
  841. #else /* CONFIG_X86_64 */
  842. /*
  843. * Note that races in the updates of insync and start aren't
  844. * problematic: insync can only get set bits added, and updates to
  845. * start are only improving performance (without affecting correctness
  846. * if undone).
  847. */
  848. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  849. static unsigned long start = VMALLOC_START & PGDIR_MASK;
  850. unsigned long address;
  851. for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
  852. if (!test_bit(pgd_index(address), insync)) {
  853. const pgd_t *pgd_ref = pgd_offset_k(address);
  854. unsigned long flags;
  855. struct page *page;
  856. if (pgd_none(*pgd_ref))
  857. continue;
  858. spin_lock_irqsave(&pgd_lock, flags);
  859. list_for_each_entry(page, &pgd_list, lru) {
  860. pgd_t *pgd;
  861. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  862. if (pgd_none(*pgd))
  863. set_pgd(pgd, *pgd_ref);
  864. else
  865. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  866. }
  867. spin_unlock_irqrestore(&pgd_lock, flags);
  868. set_bit(pgd_index(address), insync);
  869. }
  870. if (address == start)
  871. start = address + PGDIR_SIZE;
  872. }
  873. #endif
  874. }