fault.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /* Page Fault Handling for ARC (TLB Miss / ProtV)
  2. *
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/signal.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/sched.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/kdebug.h>
  16. #include <asm/pgalloc.h>
  17. static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
  18. {
  19. /*
  20. * Synchronize this task's top level page-table
  21. * with the 'reference' page table.
  22. */
  23. pgd_t *pgd, *pgd_k;
  24. pud_t *pud, *pud_k;
  25. pmd_t *pmd, *pmd_k;
  26. pgd = pgd_offset_fast(mm, address);
  27. pgd_k = pgd_offset_k(address);
  28. if (!pgd_present(*pgd_k))
  29. goto bad_area;
  30. pud = pud_offset(pgd, address);
  31. pud_k = pud_offset(pgd_k, address);
  32. if (!pud_present(*pud_k))
  33. goto bad_area;
  34. pmd = pmd_offset(pud, address);
  35. pmd_k = pmd_offset(pud_k, address);
  36. if (!pmd_present(*pmd_k))
  37. goto bad_area;
  38. set_pmd(pmd, *pmd_k);
  39. /* XXX: create the TLB entry here */
  40. return 0;
  41. bad_area:
  42. return 1;
  43. }
  44. void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
  45. unsigned long cause_code)
  46. {
  47. struct vm_area_struct *vma = NULL;
  48. struct task_struct *tsk = current;
  49. struct mm_struct *mm = tsk->mm;
  50. siginfo_t info;
  51. int fault, ret;
  52. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  53. (write ? FAULT_FLAG_WRITE : 0);
  54. /*
  55. * We fault-in kernel-space virtual memory on-demand. The
  56. * 'reference' page table is init_mm.pgd.
  57. *
  58. * NOTE! We MUST NOT take any locks for this case. We may
  59. * be in an interrupt or a critical region, and should
  60. * only copy the information from the master page table,
  61. * nothing more.
  62. */
  63. if (address >= VMALLOC_START && address <= VMALLOC_END) {
  64. ret = handle_vmalloc_fault(mm, address);
  65. if (unlikely(ret))
  66. goto bad_area_nosemaphore;
  67. else
  68. return;
  69. }
  70. info.si_code = SEGV_MAPERR;
  71. /*
  72. * If we're in an interrupt or have no user
  73. * context, we must not take the fault..
  74. */
  75. if (in_atomic() || !mm)
  76. goto no_context;
  77. retry:
  78. down_read(&mm->mmap_sem);
  79. vma = find_vma(mm, address);
  80. if (!vma)
  81. goto bad_area;
  82. if (vma->vm_start <= address)
  83. goto good_area;
  84. if (!(vma->vm_flags & VM_GROWSDOWN))
  85. goto bad_area;
  86. if (expand_stack(vma, address))
  87. goto bad_area;
  88. /*
  89. * Ok, we have a good vm_area for this memory access, so
  90. * we can handle it..
  91. */
  92. good_area:
  93. info.si_code = SEGV_ACCERR;
  94. /* Handle protection violation, execute on heap or stack */
  95. if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
  96. goto bad_area;
  97. if (write) {
  98. if (!(vma->vm_flags & VM_WRITE))
  99. goto bad_area;
  100. } else {
  101. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  102. goto bad_area;
  103. }
  104. survive:
  105. /*
  106. * If for any reason at all we couldn't handle the fault,
  107. * make sure we exit gracefully rather than endlessly redo
  108. * the fault.
  109. */
  110. fault = handle_mm_fault(mm, vma, address, flags);
  111. /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
  112. if (unlikely(fatal_signal_pending(current))) {
  113. if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
  114. up_read(&mm->mmap_sem);
  115. if (user_mode(regs))
  116. return;
  117. }
  118. if (likely(!(fault & VM_FAULT_ERROR))) {
  119. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  120. /* To avoid updating stats twice for retry case */
  121. if (fault & VM_FAULT_MAJOR)
  122. tsk->maj_flt++;
  123. else
  124. tsk->min_flt++;
  125. if (fault & VM_FAULT_RETRY) {
  126. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  127. flags |= FAULT_FLAG_TRIED;
  128. goto retry;
  129. }
  130. }
  131. /* Fault Handled Gracefully */
  132. up_read(&mm->mmap_sem);
  133. return;
  134. }
  135. /* TBD: switch to pagefault_out_of_memory() */
  136. if (fault & VM_FAULT_OOM)
  137. goto out_of_memory;
  138. else if (fault & VM_FAULT_SIGBUS)
  139. goto do_sigbus;
  140. /* no man's land */
  141. BUG();
  142. /*
  143. * Something tried to access memory that isn't in our memory map..
  144. * Fix it, but check if it's kernel or user first..
  145. */
  146. bad_area:
  147. up_read(&mm->mmap_sem);
  148. bad_area_nosemaphore:
  149. /* User mode accesses just cause a SIGSEGV */
  150. if (user_mode(regs)) {
  151. tsk->thread.fault_address = address;
  152. tsk->thread.cause_code = cause_code;
  153. info.si_signo = SIGSEGV;
  154. info.si_errno = 0;
  155. /* info.si_code has been set above */
  156. info.si_addr = (void __user *)address;
  157. force_sig_info(SIGSEGV, &info, tsk);
  158. return;
  159. }
  160. no_context:
  161. /* Are we prepared to handle this kernel fault?
  162. *
  163. * (The kernel has valid exception-points in the source
  164. * when it acesses user-memory. When it fails in one
  165. * of those points, we find it in a table and do a jump
  166. * to some fixup code that loads an appropriate error
  167. * code)
  168. */
  169. if (fixup_exception(regs))
  170. return;
  171. die("Oops", regs, address, cause_code);
  172. out_of_memory:
  173. if (is_global_init(tsk)) {
  174. yield();
  175. goto survive;
  176. }
  177. up_read(&mm->mmap_sem);
  178. if (user_mode(regs))
  179. do_group_exit(SIGKILL); /* This will never return */
  180. goto no_context;
  181. do_sigbus:
  182. up_read(&mm->mmap_sem);
  183. if (!user_mode(regs))
  184. goto no_context;
  185. tsk->thread.fault_address = address;
  186. tsk->thread.cause_code = cause_code;
  187. info.si_signo = SIGBUS;
  188. info.si_errno = 0;
  189. info.si_code = BUS_ADRERR;
  190. info.si_addr = (void __user *)address;
  191. force_sig_info(SIGBUS, &info, tsk);
  192. }