fault.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /*
  2. * MMU fault handling support.
  3. *
  4. * Copyright (C) 1998-2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/kernel.h>
  9. #include <linux/mm.h>
  10. #include <linux/smp_lock.h>
  11. #include <linux/interrupt.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/processor.h>
  14. #include <asm/system.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/kdebug.h>
  17. extern void die (char *, struct pt_regs *, long);
  18. /*
  19. * This routine is analogous to expand_stack() but instead grows the
  20. * register backing store (which grows towards higher addresses).
  21. * Since the register backing store is access sequentially, we
  22. * disallow growing the RBS by more than a page at a time. Note that
  23. * the VM_GROWSUP flag can be set on any VM area but that's fine
  24. * because the total process size is still limited by RLIMIT_STACK and
  25. * RLIMIT_AS.
  26. */
  27. static inline long
  28. expand_backing_store (struct vm_area_struct *vma, unsigned long address)
  29. {
  30. unsigned long grow;
  31. grow = PAGE_SIZE >> PAGE_SHIFT;
  32. if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur
  33. || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur))
  34. return -ENOMEM;
  35. vma->vm_end += PAGE_SIZE;
  36. vma->vm_mm->total_vm += grow;
  37. if (vma->vm_flags & VM_LOCKED)
  38. vma->vm_mm->locked_vm += grow;
  39. __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
  40. return 0;
  41. }
  42. /*
  43. * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  44. * (inside region 5, on ia64) and that page is present.
  45. */
  46. static int
  47. mapped_kernel_page_is_present (unsigned long address)
  48. {
  49. pgd_t *pgd;
  50. pud_t *pud;
  51. pmd_t *pmd;
  52. pte_t *ptep, pte;
  53. pgd = pgd_offset_k(address);
  54. if (pgd_none(*pgd) || pgd_bad(*pgd))
  55. return 0;
  56. pud = pud_offset(pgd, address);
  57. if (pud_none(*pud) || pud_bad(*pud))
  58. return 0;
  59. pmd = pmd_offset(pud, address);
  60. if (pmd_none(*pmd) || pmd_bad(*pmd))
  61. return 0;
  62. ptep = pte_offset_kernel(pmd, address);
  63. if (!ptep)
  64. return 0;
  65. pte = *ptep;
  66. return pte_present(pte);
  67. }
  68. void
  69. ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  70. {
  71. int signal = SIGSEGV, code = SEGV_MAPERR;
  72. struct vm_area_struct *vma, *prev_vma;
  73. struct mm_struct *mm = current->mm;
  74. struct siginfo si;
  75. unsigned long mask;
  76. /*
  77. * If we're in an interrupt or have no user context, we must not take the fault..
  78. */
  79. if (in_atomic() || !mm)
  80. goto no_context;
  81. #ifdef CONFIG_VIRTUAL_MEM_MAP
  82. /*
  83. * If fault is in region 5 and we are in the kernel, we may already
  84. * have the mmap_sem (pfn_valid macro is called during mmap). There
  85. * is no vma for region 5 addr's anyway, so skip getting the semaphore
  86. * and go directly to the exception handling code.
  87. */
  88. if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
  89. goto bad_area_no_up;
  90. #endif
  91. /*
  92. * This is to handle the kprobes on user space access instructions
  93. */
  94. if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
  95. SIGSEGV) == NOTIFY_STOP)
  96. return;
  97. down_read(&mm->mmap_sem);
  98. vma = find_vma_prev(mm, address, &prev_vma);
  99. if (!vma)
  100. goto bad_area;
  101. /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
  102. if (address < vma->vm_start)
  103. goto check_expansion;
  104. good_area:
  105. code = SEGV_ACCERR;
  106. /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
  107. # define VM_READ_BIT 0
  108. # define VM_WRITE_BIT 1
  109. # define VM_EXEC_BIT 2
  110. # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
  111. || (1 << VM_EXEC_BIT) != VM_EXEC)
  112. # error File is out of sync with <linux/mm.h>. Please update.
  113. # endif
  114. mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  115. | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
  116. | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
  117. if ((vma->vm_flags & mask) != mask)
  118. goto bad_area;
  119. survive:
  120. /*
  121. * If for any reason at all we couldn't handle the fault, make
  122. * sure we exit gracefully rather than endlessly redo the
  123. * fault.
  124. */
  125. switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
  126. case VM_FAULT_MINOR:
  127. ++current->min_flt;
  128. break;
  129. case VM_FAULT_MAJOR:
  130. ++current->maj_flt;
  131. break;
  132. case VM_FAULT_SIGBUS:
  133. /*
  134. * We ran out of memory, or some other thing happened
  135. * to us that made us unable to handle the page fault
  136. * gracefully.
  137. */
  138. signal = SIGBUS;
  139. goto bad_area;
  140. case VM_FAULT_OOM:
  141. goto out_of_memory;
  142. default:
  143. BUG();
  144. }
  145. up_read(&mm->mmap_sem);
  146. return;
  147. check_expansion:
  148. if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
  149. if (!(vma->vm_flags & VM_GROWSDOWN))
  150. goto bad_area;
  151. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  152. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  153. goto bad_area;
  154. if (expand_stack(vma, address))
  155. goto bad_area;
  156. } else {
  157. vma = prev_vma;
  158. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  159. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  160. goto bad_area;
  161. if (expand_backing_store(vma, address))
  162. goto bad_area;
  163. }
  164. goto good_area;
  165. bad_area:
  166. up_read(&mm->mmap_sem);
  167. #ifdef CONFIG_VIRTUAL_MEM_MAP
  168. bad_area_no_up:
  169. #endif
  170. if ((isr & IA64_ISR_SP)
  171. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  172. {
  173. /*
  174. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  175. * bit in the psr to ensure forward progress. (Target register will get a
  176. * NaT for ld.s, lfetch will be canceled.)
  177. */
  178. ia64_psr(regs)->ed = 1;
  179. return;
  180. }
  181. if (user_mode(regs)) {
  182. si.si_signo = signal;
  183. si.si_errno = 0;
  184. si.si_code = code;
  185. si.si_addr = (void __user *) address;
  186. si.si_isr = isr;
  187. si.si_flags = __ISR_VALID;
  188. force_sig_info(signal, &si, current);
  189. return;
  190. }
  191. no_context:
  192. if ((isr & IA64_ISR_SP)
  193. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  194. {
  195. /*
  196. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  197. * bit in the psr to ensure forward progress. (Target register will get a
  198. * NaT for ld.s, lfetch will be canceled.)
  199. */
  200. ia64_psr(regs)->ed = 1;
  201. return;
  202. }
  203. if (ia64_done_with_exception(regs))
  204. return;
  205. /*
  206. * Since we have no vma's for region 5, we might get here even if the address is
  207. * valid, due to the VHPT walker inserting a non present translation that becomes
  208. * stale. If that happens, the non present fault handler already purged the stale
  209. * translation, which fixed the problem. So, we check to see if the translation is
  210. * valid, and return if it is.
  211. */
  212. if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
  213. return;
  214. /*
  215. * Oops. The kernel tried to access some bad page. We'll have to terminate things
  216. * with extreme prejudice.
  217. */
  218. bust_spinlocks(1);
  219. if (address < PAGE_SIZE)
  220. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
  221. else
  222. printk(KERN_ALERT "Unable to handle kernel paging request at "
  223. "virtual address %016lx\n", address);
  224. die("Oops", regs, isr);
  225. bust_spinlocks(0);
  226. do_exit(SIGKILL);
  227. return;
  228. out_of_memory:
  229. up_read(&mm->mmap_sem);
  230. if (current->pid == 1) {
  231. yield();
  232. down_read(&mm->mmap_sem);
  233. goto survive;
  234. }
  235. printk(KERN_CRIT "VM: killing process %s\n", current->comm);
  236. if (user_mode(regs))
  237. do_exit(SIGKILL);
  238. goto no_context;
  239. }