fault.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /*
  2. * MMU fault handling support.
  3. *
  4. * Copyright (C) 1998-2002 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/kernel.h>
  9. #include <linux/mm.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kprobes.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/prefetch.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/processor.h>
  16. #include <asm/system.h>
  17. #include <asm/uaccess.h>
  18. extern int die(char *, struct pt_regs *, long);
  19. #ifdef CONFIG_KPROBES
  20. static inline int notify_page_fault(struct pt_regs *regs, int trap)
  21. {
  22. int ret = 0;
  23. if (!user_mode(regs)) {
  24. /* kprobe_running() needs smp_processor_id() */
  25. preempt_disable();
  26. if (kprobe_running() && kprobe_fault_handler(regs, trap))
  27. ret = 1;
  28. preempt_enable();
  29. }
  30. return ret;
  31. }
  32. #else
  33. static inline int notify_page_fault(struct pt_regs *regs, int trap)
  34. {
  35. return 0;
  36. }
  37. #endif
  38. /*
  39. * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
  40. * (inside region 5, on ia64) and that page is present.
  41. */
  42. static int
  43. mapped_kernel_page_is_present (unsigned long address)
  44. {
  45. pgd_t *pgd;
  46. pud_t *pud;
  47. pmd_t *pmd;
  48. pte_t *ptep, pte;
  49. pgd = pgd_offset_k(address);
  50. if (pgd_none(*pgd) || pgd_bad(*pgd))
  51. return 0;
  52. pud = pud_offset(pgd, address);
  53. if (pud_none(*pud) || pud_bad(*pud))
  54. return 0;
  55. pmd = pmd_offset(pud, address);
  56. if (pmd_none(*pmd) || pmd_bad(*pmd))
  57. return 0;
  58. ptep = pte_offset_kernel(pmd, address);
  59. if (!ptep)
  60. return 0;
  61. pte = *ptep;
  62. return pte_present(pte);
  63. }
  64. void __kprobes
  65. ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
  66. {
  67. int signal = SIGSEGV, code = SEGV_MAPERR;
  68. struct vm_area_struct *vma, *prev_vma;
  69. struct mm_struct *mm = current->mm;
  70. struct siginfo si;
  71. unsigned long mask;
  72. int fault;
  73. /* mmap_sem is performance critical.... */
  74. prefetchw(&mm->mmap_sem);
  75. /*
  76. * If we're in an interrupt or have no user context, we must not take the fault..
  77. */
  78. if (in_atomic() || !mm)
  79. goto no_context;
  80. #ifdef CONFIG_VIRTUAL_MEM_MAP
  81. /*
  82. * If fault is in region 5 and we are in the kernel, we may already
  83. * have the mmap_sem (pfn_valid macro is called during mmap). There
  84. * is no vma for region 5 addr's anyway, so skip getting the semaphore
  85. * and go directly to the exception handling code.
  86. */
  87. if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
  88. goto bad_area_no_up;
  89. #endif
  90. /*
  91. * This is to handle the kprobes on user space access instructions
  92. */
  93. if (notify_page_fault(regs, TRAP_BRKPT))
  94. return;
  95. down_read(&mm->mmap_sem);
  96. vma = find_vma_prev(mm, address, &prev_vma);
  97. if (!vma && !prev_vma )
  98. goto bad_area;
  99. /*
  100. * find_vma_prev() returns vma such that address < vma->vm_end or NULL
  101. *
  102. * May find no vma, but could be that the last vm area is the
  103. * register backing store that needs to expand upwards, in
  104. * this case vma will be null, but prev_vma will ne non-null
  105. */
  106. if (( !vma && prev_vma ) || (address < vma->vm_start) )
  107. goto check_expansion;
  108. good_area:
  109. code = SEGV_ACCERR;
  110. /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
  111. # define VM_READ_BIT 0
  112. # define VM_WRITE_BIT 1
  113. # define VM_EXEC_BIT 2
  114. # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
  115. || (1 << VM_EXEC_BIT) != VM_EXEC)
  116. # error File is out of sync with <linux/mm.h>. Please update.
  117. # endif
  118. if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
  119. goto bad_area;
  120. mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
  121. | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
  122. if ((vma->vm_flags & mask) != mask)
  123. goto bad_area;
  124. /*
  125. * If for any reason at all we couldn't handle the fault, make
  126. * sure we exit gracefully rather than endlessly redo the
  127. * fault.
  128. */
  129. fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
  130. if (unlikely(fault & VM_FAULT_ERROR)) {
  131. /*
  132. * We ran out of memory, or some other thing happened
  133. * to us that made us unable to handle the page fault
  134. * gracefully.
  135. */
  136. if (fault & VM_FAULT_OOM) {
  137. goto out_of_memory;
  138. } else if (fault & VM_FAULT_SIGBUS) {
  139. signal = SIGBUS;
  140. goto bad_area;
  141. }
  142. BUG();
  143. }
  144. if (fault & VM_FAULT_MAJOR)
  145. current->maj_flt++;
  146. else
  147. current->min_flt++;
  148. up_read(&mm->mmap_sem);
  149. return;
  150. check_expansion:
  151. if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
  152. if (!vma)
  153. goto bad_area;
  154. if (!(vma->vm_flags & VM_GROWSDOWN))
  155. goto bad_area;
  156. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  157. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  158. goto bad_area;
  159. if (expand_stack(vma, address))
  160. goto bad_area;
  161. } else {
  162. vma = prev_vma;
  163. if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
  164. || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
  165. goto bad_area;
  166. /*
  167. * Since the register backing store is accessed sequentially,
  168. * we disallow growing it by more than a page at a time.
  169. */
  170. if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
  171. goto bad_area;
  172. if (expand_upwards(vma, address))
  173. goto bad_area;
  174. }
  175. goto good_area;
  176. bad_area:
  177. up_read(&mm->mmap_sem);
  178. #ifdef CONFIG_VIRTUAL_MEM_MAP
  179. bad_area_no_up:
  180. #endif
  181. if ((isr & IA64_ISR_SP)
  182. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  183. {
  184. /*
  185. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  186. * bit in the psr to ensure forward progress. (Target register will get a
  187. * NaT for ld.s, lfetch will be canceled.)
  188. */
  189. ia64_psr(regs)->ed = 1;
  190. return;
  191. }
  192. if (user_mode(regs)) {
  193. si.si_signo = signal;
  194. si.si_errno = 0;
  195. si.si_code = code;
  196. si.si_addr = (void __user *) address;
  197. si.si_isr = isr;
  198. si.si_flags = __ISR_VALID;
  199. force_sig_info(signal, &si, current);
  200. return;
  201. }
  202. no_context:
  203. if ((isr & IA64_ISR_SP)
  204. || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
  205. {
  206. /*
  207. * This fault was due to a speculative load or lfetch.fault, set the "ed"
  208. * bit in the psr to ensure forward progress. (Target register will get a
  209. * NaT for ld.s, lfetch will be canceled.)
  210. */
  211. ia64_psr(regs)->ed = 1;
  212. return;
  213. }
  214. /*
  215. * Since we have no vma's for region 5, we might get here even if the address is
  216. * valid, due to the VHPT walker inserting a non present translation that becomes
  217. * stale. If that happens, the non present fault handler already purged the stale
  218. * translation, which fixed the problem. So, we check to see if the translation is
  219. * valid, and return if it is.
  220. */
  221. if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
  222. return;
  223. if (ia64_done_with_exception(regs))
  224. return;
  225. /*
  226. * Oops. The kernel tried to access some bad page. We'll have to terminate things
  227. * with extreme prejudice.
  228. */
  229. bust_spinlocks(1);
  230. if (address < PAGE_SIZE)
  231. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
  232. else
  233. printk(KERN_ALERT "Unable to handle kernel paging request at "
  234. "virtual address %016lx\n", address);
  235. if (die("Oops", regs, isr))
  236. regs = NULL;
  237. bust_spinlocks(0);
  238. if (regs)
  239. do_exit(SIGKILL);
  240. return;
  241. out_of_memory:
  242. up_read(&mm->mmap_sem);
  243. if (!user_mode(regs))
  244. goto no_context;
  245. pagefault_out_of_memory();
  246. }