fault.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * arch/microblaze/mm/fault.c
  3. *
  4. * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
  5. *
  6. * Derived from "arch/ppc/mm/fault.c"
  7. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8. *
  9. * Derived from "arch/i386/mm/fault.c"
  10. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  11. *
  12. * Modified by Cort Dougan and Paul Mackerras.
  13. *
  14. * This file is subject to the terms and conditions of the GNU General
  15. * Public License. See the file COPYING in the main directory of this
  16. * archive for more details.
  17. *
  18. */
  19. #include <linux/module.h>
  20. #include <linux/signal.h>
  21. #include <linux/sched.h>
  22. #include <linux/kernel.h>
  23. #include <linux/errno.h>
  24. #include <linux/string.h>
  25. #include <linux/types.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/mman.h>
  28. #include <linux/mm.h>
  29. #include <linux/interrupt.h>
  30. #include <asm/page.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/mmu.h>
  33. #include <linux/mmu_context.h>
  34. #include <linux/uaccess.h>
  35. #include <asm/exceptions.h>
  36. static unsigned long pte_misses; /* updated by do_page_fault() */
  37. static unsigned long pte_errors; /* updated by do_page_fault() */
  38. /*
  39. * Check whether the instruction at regs->pc is a store using
  40. * an update addressing form which will update r1.
  41. */
  42. static int store_updates_sp(struct pt_regs *regs)
  43. {
  44. unsigned int inst;
  45. if (get_user(inst, (unsigned int __user *)regs->pc))
  46. return 0;
  47. /* check for 1 in the rD field */
  48. if (((inst >> 21) & 0x1f) != 1)
  49. return 0;
  50. /* check for store opcodes */
  51. if ((inst & 0xd0000000) == 0xd0000000)
  52. return 1;
  53. return 0;
  54. }
  55. /*
  56. * bad_page_fault is called when we have a bad access from the kernel.
  57. * It is called from do_page_fault above and from some of the procedures
  58. * in traps.c.
  59. */
  60. void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
  61. {
  62. const struct exception_table_entry *fixup;
  63. /* MS: no context */
  64. /* Are we prepared to handle this fault? */
  65. fixup = search_exception_tables(regs->pc);
  66. if (fixup) {
  67. regs->pc = fixup->fixup;
  68. return;
  69. }
  70. /* kernel has accessed a bad area */
  71. die("kernel access of bad area", regs, sig);
  72. }
  73. /*
  74. * The error_code parameter is ESR for a data fault,
  75. * 0 for an instruction fault.
  76. */
  77. void do_page_fault(struct pt_regs *regs, unsigned long address,
  78. unsigned long error_code)
  79. {
  80. struct vm_area_struct *vma;
  81. struct mm_struct *mm = current->mm;
  82. siginfo_t info;
  83. int code = SEGV_MAPERR;
  84. int is_write = error_code & ESR_S;
  85. int fault;
  86. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  87. (is_write ? FAULT_FLAG_WRITE : 0);
  88. regs->ear = address;
  89. regs->esr = error_code;
  90. /* On a kernel SLB miss we can only check for a valid exception entry */
  91. if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
  92. pr_warn("kernel task_size exceed");
  93. _exception(SIGSEGV, regs, code, address);
  94. }
  95. /* for instr TLB miss and instr storage exception ESR_S is undefined */
  96. if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
  97. is_write = 0;
  98. if (unlikely(in_atomic() || !mm)) {
  99. if (kernel_mode(regs))
  100. goto bad_area_nosemaphore;
  101. /* in_atomic() in user mode is really bad,
  102. as is current->mm == NULL. */
  103. pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
  104. mm);
  105. pr_emerg("r15 = %lx MSR = %lx\n",
  106. regs->r15, regs->msr);
  107. die("Weird page fault", regs, SIGSEGV);
  108. }
  109. /* When running in the kernel we expect faults to occur only to
  110. * addresses in user space. All other faults represent errors in the
  111. * kernel and should generate an OOPS. Unfortunately, in the case of an
  112. * erroneous fault occurring in a code path which already holds mmap_sem
  113. * we will deadlock attempting to validate the fault against the
  114. * address space. Luckily the kernel only validly references user
  115. * space from well defined areas of code, which are listed in the
  116. * exceptions table.
  117. *
  118. * As the vast majority of faults will be valid we will only perform
  119. * the source reference check when there is a possibility of a deadlock.
  120. * Attempt to lock the address space, if we cannot we then validate the
  121. * source. If this is invalid we can skip the address space check,
  122. * thus avoiding the deadlock.
  123. */
  124. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  125. if (kernel_mode(regs) && !search_exception_tables(regs->pc))
  126. goto bad_area_nosemaphore;
  127. retry:
  128. down_read(&mm->mmap_sem);
  129. }
  130. vma = find_vma(mm, address);
  131. if (unlikely(!vma))
  132. goto bad_area;
  133. if (vma->vm_start <= address)
  134. goto good_area;
  135. if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
  136. goto bad_area;
  137. if (unlikely(!is_write))
  138. goto bad_area;
  139. /*
  140. * N.B. The ABI allows programs to access up to
  141. * a few hundred bytes below the stack pointer (TBD).
  142. * The kernel signal delivery code writes up to about 1.5kB
  143. * below the stack pointer (r1) before decrementing it.
  144. * The exec code can write slightly over 640kB to the stack
  145. * before setting the user r1. Thus we allow the stack to
  146. * expand to 1MB without further checks.
  147. */
  148. if (unlikely(address + 0x100000 < vma->vm_end)) {
  149. /* get user regs even if this fault is in kernel mode */
  150. struct pt_regs *uregs = current->thread.regs;
  151. if (uregs == NULL)
  152. goto bad_area;
  153. /*
  154. * A user-mode access to an address a long way below
  155. * the stack pointer is only valid if the instruction
  156. * is one which would update the stack pointer to the
  157. * address accessed if the instruction completed,
  158. * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
  159. * (or the byte, halfword, float or double forms).
  160. *
  161. * If we don't check this then any write to the area
  162. * between the last mapped region and the stack will
  163. * expand the stack rather than segfaulting.
  164. */
  165. if (address + 2048 < uregs->r1
  166. && (kernel_mode(regs) || !store_updates_sp(regs)))
  167. goto bad_area;
  168. }
  169. if (expand_stack(vma, address))
  170. goto bad_area;
  171. good_area:
  172. code = SEGV_ACCERR;
  173. /* a write */
  174. if (unlikely(is_write)) {
  175. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  176. goto bad_area;
  177. /* a read */
  178. } else {
  179. /* protection fault */
  180. if (unlikely(error_code & 0x08000000))
  181. goto bad_area;
  182. if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
  183. goto bad_area;
  184. }
  185. /*
  186. * If for any reason at all we couldn't handle the fault,
  187. * make sure we exit gracefully rather than endlessly redo
  188. * the fault.
  189. */
  190. fault = handle_mm_fault(mm, vma, address, flags);
  191. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  192. return;
  193. if (unlikely(fault & VM_FAULT_ERROR)) {
  194. if (fault & VM_FAULT_OOM)
  195. goto out_of_memory;
  196. else if (fault & VM_FAULT_SIGBUS)
  197. goto do_sigbus;
  198. BUG();
  199. }
  200. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  201. if (unlikely(fault & VM_FAULT_MAJOR))
  202. current->maj_flt++;
  203. else
  204. current->min_flt++;
  205. if (fault & VM_FAULT_RETRY) {
  206. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  207. flags |= FAULT_FLAG_TRIED;
  208. /*
  209. * No need to up_read(&mm->mmap_sem) as we would
  210. * have already released it in __lock_page_or_retry
  211. * in mm/filemap.c.
  212. */
  213. goto retry;
  214. }
  215. }
  216. up_read(&mm->mmap_sem);
  217. /*
  218. * keep track of tlb+htab misses that are good addrs but
  219. * just need pte's created via handle_mm_fault()
  220. * -- Cort
  221. */
  222. pte_misses++;
  223. return;
  224. bad_area:
  225. up_read(&mm->mmap_sem);
  226. bad_area_nosemaphore:
  227. pte_errors++;
  228. /* User mode accesses cause a SIGSEGV */
  229. if (user_mode(regs)) {
  230. _exception(SIGSEGV, regs, code, address);
  231. /* info.si_signo = SIGSEGV;
  232. info.si_errno = 0;
  233. info.si_code = code;
  234. info.si_addr = (void *) address;
  235. force_sig_info(SIGSEGV, &info, current);*/
  236. return;
  237. }
  238. bad_page_fault(regs, address, SIGSEGV);
  239. return;
  240. /*
  241. * We ran out of memory, or some other thing happened to us that made
  242. * us unable to handle the page fault gracefully.
  243. */
  244. out_of_memory:
  245. up_read(&mm->mmap_sem);
  246. if (!user_mode(regs))
  247. bad_page_fault(regs, address, SIGKILL);
  248. else
  249. pagefault_out_of_memory();
  250. return;
  251. do_sigbus:
  252. up_read(&mm->mmap_sem);
  253. if (user_mode(regs)) {
  254. info.si_signo = SIGBUS;
  255. info.si_errno = 0;
  256. info.si_code = BUS_ADRERR;
  257. info.si_addr = (void __user *)address;
  258. force_sig_info(SIGBUS, &info, current);
  259. return;
  260. }
  261. bad_page_fault(regs, address, SIGBUS);
  262. }