fault.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. // TODO VM_EXEC flag work-around, cache aliasing
  2. /*
  3. * arch/xtensa/mm/fault.c
  4. *
  5. * This file is subject to the terms and conditions of the GNU General Public
  6. * License. See the file "COPYING" in the main directory of this archive
  7. * for more details.
  8. *
  9. * Copyright (C) 2001 - 2005 Tensilica Inc.
  10. *
  11. * Chris Zankel <chris@zankel.net>
  12. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/hardirq.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/hardirq.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/pgalloc.h>
  22. unsigned long asid_cache = ASID_USER_FIRST;
  23. void bad_page_fault(struct pt_regs*, unsigned long, int);
  24. #undef DEBUG_PAGE_FAULT
  25. /*
  26. * This routine handles page faults. It determines the address,
  27. * and the problem, and then passes it off to one of the appropriate
  28. * routines.
  29. *
  30. * Note: does not handle Miss and MultiHit.
  31. */
  32. void do_page_fault(struct pt_regs *regs)
  33. {
  34. struct vm_area_struct * vma;
  35. struct mm_struct *mm = current->mm;
  36. unsigned int exccause = regs->exccause;
  37. unsigned int address = regs->excvaddr;
  38. siginfo_t info;
  39. int is_write, is_exec;
  40. int fault;
  41. info.si_code = SEGV_MAPERR;
  42. /* We fault-in kernel-space virtual memory on-demand. The
  43. * 'reference' page table is init_mm.pgd.
  44. */
  45. if (address >= TASK_SIZE && !user_mode(regs))
  46. goto vmalloc_fault;
  47. /* If we're in an interrupt or have no user
  48. * context, we must not take the fault..
  49. */
  50. if (in_atomic() || !mm) {
  51. bad_page_fault(regs, address, SIGSEGV);
  52. return;
  53. }
  54. is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
  55. is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
  56. exccause == EXCCAUSE_ITLB_MISS ||
  57. exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
  58. #ifdef DEBUG_PAGE_FAULT
  59. printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
  60. address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
  61. #endif
  62. down_read(&mm->mmap_sem);
  63. vma = find_vma(mm, address);
  64. if (!vma)
  65. goto bad_area;
  66. if (vma->vm_start <= address)
  67. goto good_area;
  68. if (!(vma->vm_flags & VM_GROWSDOWN))
  69. goto bad_area;
  70. if (expand_stack(vma, address))
  71. goto bad_area;
  72. /* Ok, we have a good vm_area for this memory access, so
  73. * we can handle it..
  74. */
  75. good_area:
  76. info.si_code = SEGV_ACCERR;
  77. if (is_write) {
  78. if (!(vma->vm_flags & VM_WRITE))
  79. goto bad_area;
  80. } else if (is_exec) {
  81. if (!(vma->vm_flags & VM_EXEC))
  82. goto bad_area;
  83. } else /* Allow read even from write-only pages. */
  84. if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  85. goto bad_area;
  86. /* If for any reason at all we couldn't handle the fault,
  87. * make sure we exit gracefully rather than endlessly redo
  88. * the fault.
  89. */
  90. fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
  91. if (unlikely(fault & VM_FAULT_ERROR)) {
  92. if (fault & VM_FAULT_OOM)
  93. goto out_of_memory;
  94. else if (fault & VM_FAULT_SIGBUS)
  95. goto do_sigbus;
  96. BUG();
  97. }
  98. if (fault & VM_FAULT_MAJOR)
  99. current->maj_flt++;
  100. else
  101. current->min_flt++;
  102. up_read(&mm->mmap_sem);
  103. return;
  104. /* Something tried to access memory that isn't in our memory map..
  105. * Fix it, but check if it's kernel or user first..
  106. */
  107. bad_area:
  108. up_read(&mm->mmap_sem);
  109. if (user_mode(regs)) {
  110. current->thread.bad_vaddr = address;
  111. current->thread.error_code = is_write;
  112. info.si_signo = SIGSEGV;
  113. info.si_errno = 0;
  114. /* info.si_code has been set above */
  115. info.si_addr = (void *) address;
  116. force_sig_info(SIGSEGV, &info, current);
  117. return;
  118. }
  119. bad_page_fault(regs, address, SIGSEGV);
  120. return;
  121. /* We ran out of memory, or some other thing happened to us that made
  122. * us unable to handle the page fault gracefully.
  123. */
  124. out_of_memory:
  125. up_read(&mm->mmap_sem);
  126. if (!user_mode(regs))
  127. bad_page_fault(regs, address, SIGKILL);
  128. else
  129. pagefault_out_of_memory();
  130. return;
  131. do_sigbus:
  132. up_read(&mm->mmap_sem);
  133. /* Send a sigbus, regardless of whether we were in kernel
  134. * or user mode.
  135. */
  136. current->thread.bad_vaddr = address;
  137. info.si_code = SIGBUS;
  138. info.si_errno = 0;
  139. info.si_code = BUS_ADRERR;
  140. info.si_addr = (void *) address;
  141. force_sig_info(SIGBUS, &info, current);
  142. /* Kernel mode? Handle exceptions or die */
  143. if (!user_mode(regs))
  144. bad_page_fault(regs, address, SIGBUS);
  145. vmalloc_fault:
  146. {
  147. /* Synchronize this task's top level page-table
  148. * with the 'reference' page table.
  149. */
  150. struct mm_struct *act_mm = current->active_mm;
  151. int index = pgd_index(address);
  152. pgd_t *pgd, *pgd_k;
  153. pmd_t *pmd, *pmd_k;
  154. pte_t *pte_k;
  155. if (act_mm == NULL)
  156. goto bad_page_fault;
  157. pgd = act_mm->pgd + index;
  158. pgd_k = init_mm.pgd + index;
  159. if (!pgd_present(*pgd_k))
  160. goto bad_page_fault;
  161. pgd_val(*pgd) = pgd_val(*pgd_k);
  162. pmd = pmd_offset(pgd, address);
  163. pmd_k = pmd_offset(pgd_k, address);
  164. if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
  165. goto bad_page_fault;
  166. pmd_val(*pmd) = pmd_val(*pmd_k);
  167. pte_k = pte_offset_kernel(pmd_k, address);
  168. if (!pte_present(*pte_k))
  169. goto bad_page_fault;
  170. return;
  171. }
  172. bad_page_fault:
  173. bad_page_fault(regs, address, SIGKILL);
  174. return;
  175. }
  176. void
  177. bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
  178. {
  179. extern void die(const char*, struct pt_regs*, long);
  180. const struct exception_table_entry *entry;
  181. /* Are we prepared to handle this kernel fault? */
  182. if ((entry = search_exception_tables(regs->pc)) != NULL) {
  183. #ifdef DEBUG_PAGE_FAULT
  184. printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
  185. current->comm, regs->pc, entry->fixup);
  186. #endif
  187. current->thread.bad_uaddr = address;
  188. regs->pc = entry->fixup;
  189. return;
  190. }
  191. /* Oops. The kernel tried to access some bad page. We'll have to
  192. * terminate things with extreme prejudice.
  193. */
  194. printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
  195. "address %08lx\n pc = %08lx, ra = %08lx\n",
  196. address, regs->pc, regs->areg[0]);
  197. die("Oops", regs, sig);
  198. do_exit(sig);
  199. }