fault.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
  4. */
  5. #include <linux/signal.h>
  6. #include <linux/sched.h>
  7. #include <linux/kernel.h>
  8. #include <linux/errno.h>
  9. #include <linux/string.h>
  10. #include <linux/types.h>
  11. #include <linux/ptrace.h>
  12. #include <linux/mman.h>
  13. #include <linux/mm.h>
  14. #include <linux/smp.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/init.h>
  17. #include <linux/tty.h>
  18. #include <linux/vt_kern.h> /* For unblank_screen() */
  19. #include <linux/compiler.h>
  20. #include <linux/highmem.h>
  21. #include <linux/bootmem.h> /* for max_low_pfn */
  22. #include <linux/vmalloc.h>
  23. #include <linux/module.h>
  24. #include <linux/kprobes.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/kdebug.h>
  27. #include <asm/system.h>
  28. #include <asm/desc.h>
  29. #include <asm/segment.h>
  30. #include <asm/pgalloc.h>
  31. #include <asm/smp.h>
  32. #include <asm/tlbflush.h>
  33. #include <asm/proto.h>
  34. #include <asm-generic/sections.h>
  35. /*
  36. * Page fault error code bits
  37. * bit 0 == 0 means no page found, 1 means protection fault
  38. * bit 1 == 0 means read, 1 means write
  39. * bit 2 == 0 means kernel, 1 means user-mode
  40. * bit 3 == 1 means use of reserved bit detected
  41. * bit 4 == 1 means fault was an instruction fetch
  42. */
  43. #define PF_PROT (1<<0)
  44. #define PF_WRITE (1<<1)
  45. #define PF_USER (1<<2)
  46. #define PF_RSVD (1<<3)
  47. #define PF_INSTR (1<<4)
  48. static inline int notify_page_fault(struct pt_regs *regs)
  49. {
  50. #ifdef CONFIG_KPROBES
  51. int ret = 0;
  52. /* kprobe_running() needs smp_processor_id() */
  53. #ifdef CONFIG_X86_32
  54. if (!user_mode_vm(regs)) {
  55. #else
  56. if (!user_mode(regs)) {
  57. #endif
  58. preempt_disable();
  59. if (kprobe_running() && kprobe_fault_handler(regs, 14))
  60. ret = 1;
  61. preempt_enable();
  62. }
  63. return ret;
  64. #else
  65. return 0;
  66. #endif
  67. }
  68. /*
  69. * X86_32
  70. * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  71. * Check that here and ignore it.
  72. *
  73. * X86_64
  74. * Sometimes the CPU reports invalid exceptions on prefetch.
  75. * Check that here and ignore it.
  76. *
  77. * Opcode checker based on code by Richard Brunner
  78. */
  79. static int is_prefetch(struct pt_regs *regs, unsigned long addr,
  80. unsigned long error_code)
  81. {
  82. unsigned char *instr;
  83. int scan_more = 1;
  84. int prefetch = 0;
  85. unsigned char *max_instr;
  86. #ifdef CONFIG_X86_32
  87. /* Catch an obscure case of prefetch inside an NX page: */
  88. if ((__supported_pte_mask & _PAGE_NX) && (error_code & 16))
  89. return 0;
  90. #endif
  91. /* If it was a exec fault on NX page, ignore */
  92. if (error_code & PF_INSTR)
  93. return 0;
  94. instr = (unsigned char *)convert_ip_to_linear(current, regs);
  95. max_instr = instr + 15;
  96. if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
  97. return 0;
  98. while (scan_more && instr < max_instr) {
  99. unsigned char opcode;
  100. unsigned char instr_hi;
  101. unsigned char instr_lo;
  102. if (probe_kernel_address(instr, opcode))
  103. break;
  104. instr_hi = opcode & 0xf0;
  105. instr_lo = opcode & 0x0f;
  106. instr++;
  107. switch (instr_hi) {
  108. case 0x20:
  109. case 0x30:
  110. /*
  111. * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  112. * In X86_64 long mode, the CPU will signal invalid
  113. * opcode if some of these prefixes are present so
  114. * X86_64 will never get here anyway
  115. */
  116. scan_more = ((instr_lo & 7) == 0x6);
  117. break;
  118. #ifdef CONFIG_X86_64
  119. case 0x40:
  120. /*
  121. * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  122. * Need to figure out under what instruction mode the
  123. * instruction was issued. Could check the LDT for lm,
  124. * but for now it's good enough to assume that long
  125. * mode only uses well known segments or kernel.
  126. */
  127. scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
  128. break;
  129. #endif
  130. case 0x60:
  131. /* 0x64 thru 0x67 are valid prefixes in all modes. */
  132. scan_more = (instr_lo & 0xC) == 0x4;
  133. break;
  134. case 0xF0:
  135. /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  136. scan_more = !instr_lo || (instr_lo>>1) == 1;
  137. break;
  138. case 0x00:
  139. /* Prefetch instruction is 0x0F0D or 0x0F18 */
  140. scan_more = 0;
  141. if (probe_kernel_address(instr, opcode))
  142. break;
  143. prefetch = (instr_lo == 0xF) &&
  144. (opcode == 0x0D || opcode == 0x18);
  145. break;
  146. default:
  147. scan_more = 0;
  148. break;
  149. }
  150. }
  151. return prefetch;
  152. }
  153. static void force_sig_info_fault(int si_signo, int si_code,
  154. unsigned long address, struct task_struct *tsk)
  155. {
  156. siginfo_t info;
  157. info.si_signo = si_signo;
  158. info.si_errno = 0;
  159. info.si_code = si_code;
  160. info.si_addr = (void __user *)address;
  161. force_sig_info(si_signo, &info, tsk);
  162. }
  163. #ifdef CONFIG_X86_64
  164. static int bad_address(void *p)
  165. {
  166. unsigned long dummy;
  167. return probe_kernel_address((unsigned long *)p, dummy);
  168. }
  169. #endif
  170. static void dump_pagetable(unsigned long address)
  171. {
  172. #ifdef CONFIG_X86_32
  173. __typeof__(pte_val(__pte(0))) page;
  174. page = read_cr3();
  175. page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
  176. #ifdef CONFIG_X86_PAE
  177. printk("*pdpt = %016Lx ", page);
  178. if ((page >> PAGE_SHIFT) < max_low_pfn
  179. && page & _PAGE_PRESENT) {
  180. page &= PAGE_MASK;
  181. page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
  182. & (PTRS_PER_PMD - 1)];
  183. printk(KERN_CONT "*pde = %016Lx ", page);
  184. page &= ~_PAGE_NX;
  185. }
  186. #else
  187. printk("*pde = %08lx ", page);
  188. #endif
  189. /*
  190. * We must not directly access the pte in the highpte
  191. * case if the page table is located in highmem.
  192. * And let's rather not kmap-atomic the pte, just in case
  193. * it's allocated already.
  194. */
  195. if ((page >> PAGE_SHIFT) < max_low_pfn
  196. && (page & _PAGE_PRESENT)
  197. && !(page & _PAGE_PSE)) {
  198. page &= PAGE_MASK;
  199. page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
  200. & (PTRS_PER_PTE - 1)];
  201. printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
  202. }
  203. printk("\n");
  204. #else /* CONFIG_X86_64 */
  205. pgd_t *pgd;
  206. pud_t *pud;
  207. pmd_t *pmd;
  208. pte_t *pte;
  209. pgd = (pgd_t *)read_cr3();
  210. pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
  211. pgd += pgd_index(address);
  212. if (bad_address(pgd)) goto bad;
  213. printk("PGD %lx ", pgd_val(*pgd));
  214. if (!pgd_present(*pgd)) goto ret;
  215. pud = pud_offset(pgd, address);
  216. if (bad_address(pud)) goto bad;
  217. printk("PUD %lx ", pud_val(*pud));
  218. if (!pud_present(*pud) || pud_large(*pud))
  219. goto ret;
  220. pmd = pmd_offset(pud, address);
  221. if (bad_address(pmd)) goto bad;
  222. printk("PMD %lx ", pmd_val(*pmd));
  223. if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
  224. pte = pte_offset_kernel(pmd, address);
  225. if (bad_address(pte)) goto bad;
  226. printk("PTE %lx", pte_val(*pte));
  227. ret:
  228. printk("\n");
  229. return;
  230. bad:
  231. printk("BAD\n");
  232. #endif
  233. }
  234. #ifdef CONFIG_X86_32
  235. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  236. {
  237. unsigned index = pgd_index(address);
  238. pgd_t *pgd_k;
  239. pud_t *pud, *pud_k;
  240. pmd_t *pmd, *pmd_k;
  241. pgd += index;
  242. pgd_k = init_mm.pgd + index;
  243. if (!pgd_present(*pgd_k))
  244. return NULL;
  245. /*
  246. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  247. * and redundant with the set_pmd() on non-PAE. As would
  248. * set_pud.
  249. */
  250. pud = pud_offset(pgd, address);
  251. pud_k = pud_offset(pgd_k, address);
  252. if (!pud_present(*pud_k))
  253. return NULL;
  254. pmd = pmd_offset(pud, address);
  255. pmd_k = pmd_offset(pud_k, address);
  256. if (!pmd_present(*pmd_k))
  257. return NULL;
  258. if (!pmd_present(*pmd)) {
  259. set_pmd(pmd, *pmd_k);
  260. arch_flush_lazy_mmu_mode();
  261. } else
  262. BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
  263. return pmd_k;
  264. }
  265. #endif
  266. #ifdef CONFIG_X86_64
  267. static const char errata93_warning[] =
  268. KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
  269. KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
  270. KERN_ERR "******* Please consider a BIOS update.\n"
  271. KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
  272. #endif
  273. /* Workaround for K8 erratum #93 & buggy BIOS.
  274. BIOS SMM functions are required to use a specific workaround
  275. to avoid corruption of the 64bit RIP register on C stepping K8.
  276. A lot of BIOS that didn't get tested properly miss this.
  277. The OS sees this as a page fault with the upper 32bits of RIP cleared.
  278. Try to work around it here.
  279. Note we only handle faults in kernel here.
  280. Does nothing for X86_32
  281. */
  282. static int is_errata93(struct pt_regs *regs, unsigned long address)
  283. {
  284. #ifdef CONFIG_X86_64
  285. static int warned;
  286. if (address != regs->ip)
  287. return 0;
  288. if ((address >> 32) != 0)
  289. return 0;
  290. address |= 0xffffffffUL << 32;
  291. if ((address >= (u64)_stext && address <= (u64)_etext) ||
  292. (address >= MODULES_VADDR && address <= MODULES_END)) {
  293. if (!warned) {
  294. printk(errata93_warning);
  295. warned = 1;
  296. }
  297. regs->ip = address;
  298. return 1;
  299. }
  300. #endif
  301. return 0;
  302. }
  303. /*
  304. * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
  305. * addresses >4GB. We catch this in the page fault handler because these
  306. * addresses are not reachable. Just detect this case and return. Any code
  307. * segment in LDT is compatibility mode.
  308. */
  309. static int is_errata100(struct pt_regs *regs, unsigned long address)
  310. {
  311. #ifdef CONFIG_X86_64
  312. if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
  313. (address >> 32))
  314. return 1;
  315. #endif
  316. return 0;
  317. }
  318. void do_invalid_op(struct pt_regs *, unsigned long);
  319. static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
  320. {
  321. #ifdef CONFIG_X86_F00F_BUG
  322. unsigned long nr;
  323. /*
  324. * Pentium F0 0F C7 C8 bug workaround.
  325. */
  326. if (boot_cpu_data.f00f_bug) {
  327. nr = (address - idt_descr.address) >> 3;
  328. if (nr == 6) {
  329. do_invalid_op(regs, 0);
  330. return 1;
  331. }
  332. }
  333. #endif
  334. return 0;
  335. }
  336. static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  337. unsigned long address)
  338. {
  339. #ifdef CONFIG_X86_32
  340. if (!oops_may_print())
  341. return;
  342. #endif
  343. #ifdef CONFIG_X86_PAE
  344. if (error_code & PF_INSTR) {
  345. unsigned int level;
  346. pte_t *pte = lookup_address(address, &level);
  347. if (pte && pte_present(*pte) && !pte_exec(*pte))
  348. printk(KERN_CRIT "kernel tried to execute "
  349. "NX-protected page - exploit attempt? "
  350. "(uid: %d)\n", current->uid);
  351. }
  352. #endif
  353. printk(KERN_ALERT "BUG: unable to handle kernel ");
  354. if (address < PAGE_SIZE)
  355. printk(KERN_CONT "NULL pointer dereference");
  356. else
  357. printk(KERN_CONT "paging request");
  358. #ifdef CONFIG_X86_32
  359. printk(KERN_CONT " at %08lx\n", address);
  360. #else
  361. printk(KERN_CONT " at %016lx\n", address);
  362. #endif
  363. printk(KERN_ALERT "IP:");
  364. printk_address(regs->ip, 1);
  365. dump_pagetable(address);
  366. }
  367. #ifdef CONFIG_X86_64
  368. static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
  369. unsigned long error_code)
  370. {
  371. unsigned long flags = oops_begin();
  372. struct task_struct *tsk;
  373. printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
  374. current->comm, address);
  375. dump_pagetable(address);
  376. tsk = current;
  377. tsk->thread.cr2 = address;
  378. tsk->thread.trap_no = 14;
  379. tsk->thread.error_code = error_code;
  380. if (__die("Bad pagetable", regs, error_code))
  381. regs = NULL;
  382. oops_end(flags, regs, SIGKILL);
  383. }
  384. #endif
  385. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  386. {
  387. if ((error_code & PF_WRITE) && !pte_write(*pte))
  388. return 0;
  389. if ((error_code & PF_INSTR) && !pte_exec(*pte))
  390. return 0;
  391. return 1;
  392. }
  393. /*
  394. * Handle a spurious fault caused by a stale TLB entry. This allows
  395. * us to lazily refresh the TLB when increasing the permissions of a
  396. * kernel page (RO -> RW or NX -> X). Doing it eagerly is very
  397. * expensive since that implies doing a full cross-processor TLB
  398. * flush, even if no stale TLB entries exist on other processors.
  399. * There are no security implications to leaving a stale TLB when
  400. * increasing the permissions on a page.
  401. */
  402. static int spurious_fault(unsigned long address,
  403. unsigned long error_code)
  404. {
  405. pgd_t *pgd;
  406. pud_t *pud;
  407. pmd_t *pmd;
  408. pte_t *pte;
  409. /* Reserved-bit violation or user access to kernel space? */
  410. if (error_code & (PF_USER | PF_RSVD))
  411. return 0;
  412. pgd = init_mm.pgd + pgd_index(address);
  413. if (!pgd_present(*pgd))
  414. return 0;
  415. pud = pud_offset(pgd, address);
  416. if (!pud_present(*pud))
  417. return 0;
  418. if (pud_large(*pud))
  419. return spurious_fault_check(error_code, (pte_t *) pud);
  420. pmd = pmd_offset(pud, address);
  421. if (!pmd_present(*pmd))
  422. return 0;
  423. if (pmd_large(*pmd))
  424. return spurious_fault_check(error_code, (pte_t *) pmd);
  425. pte = pte_offset_kernel(pmd, address);
  426. if (!pte_present(*pte))
  427. return 0;
  428. return spurious_fault_check(error_code, pte);
  429. }
  430. /*
  431. * X86_32
  432. * Handle a fault on the vmalloc or module mapping area
  433. *
  434. * X86_64
  435. * Handle a fault on the vmalloc area
  436. *
  437. * This assumes no large pages in there.
  438. */
  439. static int vmalloc_fault(unsigned long address)
  440. {
  441. #ifdef CONFIG_X86_32
  442. unsigned long pgd_paddr;
  443. pmd_t *pmd_k;
  444. pte_t *pte_k;
  445. /*
  446. * Synchronize this task's top level page-table
  447. * with the 'reference' page table.
  448. *
  449. * Do _not_ use "current" here. We might be inside
  450. * an interrupt in the middle of a task switch..
  451. */
  452. pgd_paddr = read_cr3();
  453. pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
  454. if (!pmd_k)
  455. return -1;
  456. pte_k = pte_offset_kernel(pmd_k, address);
  457. if (!pte_present(*pte_k))
  458. return -1;
  459. return 0;
  460. #else
  461. pgd_t *pgd, *pgd_ref;
  462. pud_t *pud, *pud_ref;
  463. pmd_t *pmd, *pmd_ref;
  464. pte_t *pte, *pte_ref;
  465. /* Make sure we are in vmalloc area */
  466. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  467. return -1;
  468. /* Copy kernel mappings over when needed. This can also
  469. happen within a race in page table update. In the later
  470. case just flush. */
  471. pgd = pgd_offset(current->mm ?: &init_mm, address);
  472. pgd_ref = pgd_offset_k(address);
  473. if (pgd_none(*pgd_ref))
  474. return -1;
  475. if (pgd_none(*pgd))
  476. set_pgd(pgd, *pgd_ref);
  477. else
  478. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  479. /* Below here mismatches are bugs because these lower tables
  480. are shared */
  481. pud = pud_offset(pgd, address);
  482. pud_ref = pud_offset(pgd_ref, address);
  483. if (pud_none(*pud_ref))
  484. return -1;
  485. if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
  486. BUG();
  487. pmd = pmd_offset(pud, address);
  488. pmd_ref = pmd_offset(pud_ref, address);
  489. if (pmd_none(*pmd_ref))
  490. return -1;
  491. if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
  492. BUG();
  493. pte_ref = pte_offset_kernel(pmd_ref, address);
  494. if (!pte_present(*pte_ref))
  495. return -1;
  496. pte = pte_offset_kernel(pmd, address);
  497. /* Don't use pte_page here, because the mappings can point
  498. outside mem_map, and the NUMA hash lookup cannot handle
  499. that. */
  500. if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
  501. BUG();
  502. return 0;
  503. #endif
  504. }
  505. int show_unhandled_signals = 1;
  506. /*
  507. * This routine handles page faults. It determines the address,
  508. * and the problem, and then passes it off to one of the appropriate
  509. * routines.
  510. */
  511. #ifdef CONFIG_X86_64
  512. asmlinkage
  513. #endif
  514. void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
  515. {
  516. struct task_struct *tsk;
  517. struct mm_struct *mm;
  518. struct vm_area_struct *vma;
  519. unsigned long address;
  520. int write, si_code;
  521. int fault;
  522. #ifdef CONFIG_X86_64
  523. unsigned long flags;
  524. #endif
  525. /*
  526. * We can fault from pretty much anywhere, with unknown IRQ state.
  527. */
  528. trace_hardirqs_fixup();
  529. tsk = current;
  530. mm = tsk->mm;
  531. prefetchw(&mm->mmap_sem);
  532. /* get the address */
  533. address = read_cr2();
  534. si_code = SEGV_MAPERR;
  535. if (notify_page_fault(regs))
  536. return;
  537. /*
  538. * We fault-in kernel-space virtual memory on-demand. The
  539. * 'reference' page table is init_mm.pgd.
  540. *
  541. * NOTE! We MUST NOT take any locks for this case. We may
  542. * be in an interrupt or a critical region, and should
  543. * only copy the information from the master page table,
  544. * nothing more.
  545. *
  546. * This verifies that the fault happens in kernel space
  547. * (error_code & 4) == 0, and that the fault was not a
  548. * protection error (error_code & 9) == 0.
  549. */
  550. #ifdef CONFIG_X86_32
  551. if (unlikely(address >= TASK_SIZE)) {
  552. #else
  553. if (unlikely(address >= TASK_SIZE64)) {
  554. #endif
  555. if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
  556. vmalloc_fault(address) >= 0)
  557. return;
  558. /* Can handle a stale RO->RW TLB */
  559. if (spurious_fault(address, error_code))
  560. return;
  561. /*
  562. * Don't take the mm semaphore here. If we fixup a prefetch
  563. * fault we could otherwise deadlock.
  564. */
  565. goto bad_area_nosemaphore;
  566. }
  567. #ifdef CONFIG_X86_32
  568. /* It's safe to allow irq's after cr2 has been saved and the vmalloc
  569. fault has been handled. */
  570. if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
  571. local_irq_enable();
  572. /*
  573. * If we're in an interrupt, have no user context or are running in an
  574. * atomic region then we must not take the fault.
  575. */
  576. if (in_atomic() || !mm)
  577. goto bad_area_nosemaphore;
  578. #else /* CONFIG_X86_64 */
  579. if (likely(regs->flags & X86_EFLAGS_IF))
  580. local_irq_enable();
  581. if (unlikely(error_code & PF_RSVD))
  582. pgtable_bad(address, regs, error_code);
  583. /*
  584. * If we're in an interrupt, have no user context or are running in an
  585. * atomic region then we must not take the fault.
  586. */
  587. if (unlikely(in_atomic() || !mm))
  588. goto bad_area_nosemaphore;
  589. /*
  590. * User-mode registers count as a user access even for any
  591. * potential system fault or CPU buglet.
  592. */
  593. if (user_mode_vm(regs))
  594. error_code |= PF_USER;
  595. again:
  596. #endif
  597. /* When running in the kernel we expect faults to occur only to
  598. * addresses in user space. All other faults represent errors in the
  599. * kernel and should generate an OOPS. Unfortunately, in the case of an
  600. * erroneous fault occurring in a code path which already holds mmap_sem
  601. * we will deadlock attempting to validate the fault against the
  602. * address space. Luckily the kernel only validly references user
  603. * space from well defined areas of code, which are listed in the
  604. * exceptions table.
  605. *
  606. * As the vast majority of faults will be valid we will only perform
  607. * the source reference check when there is a possibility of a deadlock.
  608. * Attempt to lock the address space, if we cannot we then validate the
  609. * source. If this is invalid we can skip the address space check,
  610. * thus avoiding the deadlock.
  611. */
  612. if (!down_read_trylock(&mm->mmap_sem)) {
  613. if ((error_code & PF_USER) == 0 &&
  614. !search_exception_tables(regs->ip))
  615. goto bad_area_nosemaphore;
  616. down_read(&mm->mmap_sem);
  617. }
  618. vma = find_vma(mm, address);
  619. if (!vma)
  620. goto bad_area;
  621. if (vma->vm_start <= address)
  622. goto good_area;
  623. if (!(vma->vm_flags & VM_GROWSDOWN))
  624. goto bad_area;
  625. if (error_code & PF_USER) {
  626. /*
  627. * Accessing the stack below %sp is always a bug.
  628. * The large cushion allows instructions like enter
  629. * and pusha to work. ("enter $65535,$31" pushes
  630. * 32 pointers and then decrements %sp by 65535.)
  631. */
  632. if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
  633. goto bad_area;
  634. }
  635. if (expand_stack(vma, address))
  636. goto bad_area;
  637. /*
  638. * Ok, we have a good vm_area for this memory access, so
  639. * we can handle it..
  640. */
  641. good_area:
  642. si_code = SEGV_ACCERR;
  643. write = 0;
  644. switch (error_code & (PF_PROT|PF_WRITE)) {
  645. default: /* 3: write, present */
  646. /* fall through */
  647. case PF_WRITE: /* write, not present */
  648. if (!(vma->vm_flags & VM_WRITE))
  649. goto bad_area;
  650. write++;
  651. break;
  652. case PF_PROT: /* read, present */
  653. goto bad_area;
  654. case 0: /* read, not present */
  655. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  656. goto bad_area;
  657. }
  658. #ifdef CONFIG_X86_32
  659. survive:
  660. #endif
  661. /*
  662. * If for any reason at all we couldn't handle the fault,
  663. * make sure we exit gracefully rather than endlessly redo
  664. * the fault.
  665. */
  666. fault = handle_mm_fault(mm, vma, address, write);
  667. if (unlikely(fault & VM_FAULT_ERROR)) {
  668. if (fault & VM_FAULT_OOM)
  669. goto out_of_memory;
  670. else if (fault & VM_FAULT_SIGBUS)
  671. goto do_sigbus;
  672. BUG();
  673. }
  674. if (fault & VM_FAULT_MAJOR)
  675. tsk->maj_flt++;
  676. else
  677. tsk->min_flt++;
  678. #ifdef CONFIG_X86_32
  679. /*
  680. * Did it hit the DOS screen memory VA from vm86 mode?
  681. */
  682. if (v8086_mode(regs)) {
  683. unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
  684. if (bit < 32)
  685. tsk->thread.screen_bitmap |= 1 << bit;
  686. }
  687. #endif
  688. up_read(&mm->mmap_sem);
  689. return;
  690. /*
  691. * Something tried to access memory that isn't in our memory map..
  692. * Fix it, but check if it's kernel or user first..
  693. */
  694. bad_area:
  695. up_read(&mm->mmap_sem);
  696. bad_area_nosemaphore:
  697. /* User mode accesses just cause a SIGSEGV */
  698. if (error_code & PF_USER) {
  699. /*
  700. * It's possible to have interrupts off here.
  701. */
  702. local_irq_enable();
  703. /*
  704. * Valid to do another page fault here because this one came
  705. * from user space.
  706. */
  707. if (is_prefetch(regs, address, error_code))
  708. return;
  709. if (is_errata100(regs, address))
  710. return;
  711. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  712. printk_ratelimit()) {
  713. printk(
  714. #ifdef CONFIG_X86_32
  715. "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
  716. #else
  717. "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
  718. #endif
  719. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  720. tsk->comm, task_pid_nr(tsk), address, regs->ip,
  721. regs->sp, error_code);
  722. print_vma_addr(" in ", regs->ip);
  723. printk("\n");
  724. }
  725. tsk->thread.cr2 = address;
  726. /* Kernel addresses are always protection faults */
  727. tsk->thread.error_code = error_code | (address >= TASK_SIZE);
  728. tsk->thread.trap_no = 14;
  729. force_sig_info_fault(SIGSEGV, si_code, address, tsk);
  730. return;
  731. }
  732. if (is_f00f_bug(regs, address))
  733. return;
  734. no_context:
  735. /* Are we prepared to handle this kernel fault? */
  736. if (fixup_exception(regs))
  737. return;
  738. /*
  739. * X86_32
  740. * Valid to do another page fault here, because if this fault
  741. * had been triggered by is_prefetch fixup_exception would have
  742. * handled it.
  743. *
  744. * X86_64
  745. * Hall of shame of CPU/BIOS bugs.
  746. */
  747. if (is_prefetch(regs, address, error_code))
  748. return;
  749. if (is_errata93(regs, address))
  750. return;
  751. /*
  752. * Oops. The kernel tried to access some bad page. We'll have to
  753. * terminate things with extreme prejudice.
  754. */
  755. #ifdef CONFIG_X86_32
  756. bust_spinlocks(1);
  757. #else
  758. flags = oops_begin();
  759. #endif
  760. show_fault_oops(regs, error_code, address);
  761. tsk->thread.cr2 = address;
  762. tsk->thread.trap_no = 14;
  763. tsk->thread.error_code = error_code;
  764. #ifdef CONFIG_X86_32
  765. die("Oops", regs, error_code);
  766. bust_spinlocks(0);
  767. do_exit(SIGKILL);
  768. #else
  769. if (__die("Oops", regs, error_code))
  770. regs = NULL;
  771. /* Executive summary in case the body of the oops scrolled away */
  772. printk(KERN_EMERG "CR2: %016lx\n", address);
  773. oops_end(flags, regs, SIGKILL);
  774. #endif
  775. /*
  776. * We ran out of memory, or some other thing happened to us that made
  777. * us unable to handle the page fault gracefully.
  778. */
  779. out_of_memory:
  780. up_read(&mm->mmap_sem);
  781. if (is_global_init(tsk)) {
  782. yield();
  783. #ifdef CONFIG_X86_32
  784. down_read(&mm->mmap_sem);
  785. goto survive;
  786. #else
  787. goto again;
  788. #endif
  789. }
  790. printk("VM: killing process %s\n", tsk->comm);
  791. if (error_code & PF_USER)
  792. do_group_exit(SIGKILL);
  793. goto no_context;
  794. do_sigbus:
  795. up_read(&mm->mmap_sem);
  796. /* Kernel mode? Handle exceptions or die */
  797. if (!(error_code & PF_USER))
  798. goto no_context;
  799. #ifdef CONFIG_X86_32
  800. /* User space => ok to do another page fault */
  801. if (is_prefetch(regs, address, error_code))
  802. return;
  803. #endif
  804. tsk->thread.cr2 = address;
  805. tsk->thread.error_code = error_code;
  806. tsk->thread.trap_no = 14;
  807. force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
  808. }
  809. DEFINE_SPINLOCK(pgd_lock);
  810. LIST_HEAD(pgd_list);
  811. void vmalloc_sync_all(void)
  812. {
  813. #ifdef CONFIG_X86_32
  814. /*
  815. * Note that races in the updates of insync and start aren't
  816. * problematic: insync can only get set bits added, and updates to
  817. * start are only improving performance (without affecting correctness
  818. * if undone).
  819. */
  820. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  821. static unsigned long start = TASK_SIZE;
  822. unsigned long address;
  823. if (SHARED_KERNEL_PMD)
  824. return;
  825. BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
  826. for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
  827. if (!test_bit(pgd_index(address), insync)) {
  828. unsigned long flags;
  829. struct page *page;
  830. spin_lock_irqsave(&pgd_lock, flags);
  831. list_for_each_entry(page, &pgd_list, lru) {
  832. if (!vmalloc_sync_one(page_address(page),
  833. address))
  834. break;
  835. }
  836. spin_unlock_irqrestore(&pgd_lock, flags);
  837. if (!page)
  838. set_bit(pgd_index(address), insync);
  839. }
  840. if (address == start && test_bit(pgd_index(address), insync))
  841. start = address + PGDIR_SIZE;
  842. }
  843. #else /* CONFIG_X86_64 */
  844. /*
  845. * Note that races in the updates of insync and start aren't
  846. * problematic: insync can only get set bits added, and updates to
  847. * start are only improving performance (without affecting correctness
  848. * if undone).
  849. */
  850. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  851. static unsigned long start = VMALLOC_START & PGDIR_MASK;
  852. unsigned long address;
  853. for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
  854. if (!test_bit(pgd_index(address), insync)) {
  855. const pgd_t *pgd_ref = pgd_offset_k(address);
  856. unsigned long flags;
  857. struct page *page;
  858. if (pgd_none(*pgd_ref))
  859. continue;
  860. spin_lock_irqsave(&pgd_lock, flags);
  861. list_for_each_entry(page, &pgd_list, lru) {
  862. pgd_t *pgd;
  863. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  864. if (pgd_none(*pgd))
  865. set_pgd(pgd, *pgd_ref);
  866. else
  867. BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
  868. }
  869. spin_unlock_irqrestore(&pgd_lock, flags);
  870. set_bit(pgd_index(address), insync);
  871. }
  872. if (address == start)
  873. start = address + PGDIR_SIZE;
  874. }
  875. /* Check that there is no need to do the same for the modules area. */
  876. BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
  877. BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
  878. (__START_KERNEL & PGDIR_MASK)));
  879. #endif
  880. }