fault.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. // TODO VM_EXEC flag work-around, cache aliasing
  2. /*
  3. * arch/xtensa/mm/fault.c
  4. *
  5. * This file is subject to the terms and conditions of the GNU General Public
  6. * License. See the file "COPYING" in the main directory of this archive
  7. * for more details.
  8. *
  9. * Copyright (C) 2001 - 2005 Tensilica Inc.
  10. *
  11. * Chris Zankel <chris@zankel.net>
  12. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/hardirq.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/hardirq.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/system.h>
  22. #include <asm/pgalloc.h>
  23. unsigned long asid_cache = ASID_USER_FIRST;
  24. void bad_page_fault(struct pt_regs*, unsigned long, int);
  25. #undef DEBUG_PAGE_FAULT
  26. /*
  27. * This routine handles page faults. It determines the address,
  28. * and the problem, and then passes it off to one of the appropriate
  29. * routines.
  30. *
  31. * Note: does not handle Miss and MultiHit.
  32. */
  33. void do_page_fault(struct pt_regs *regs)
  34. {
  35. struct vm_area_struct * vma;
  36. struct mm_struct *mm = current->mm;
  37. unsigned int exccause = regs->exccause;
  38. unsigned int address = regs->excvaddr;
  39. siginfo_t info;
  40. int is_write, is_exec;
  41. int fault;
  42. info.si_code = SEGV_MAPERR;
  43. /* We fault-in kernel-space virtual memory on-demand. The
  44. * 'reference' page table is init_mm.pgd.
  45. */
  46. if (address >= TASK_SIZE && !user_mode(regs))
  47. goto vmalloc_fault;
  48. /* If we're in an interrupt or have no user
  49. * context, we must not take the fault..
  50. */
  51. if (in_atomic() || !mm) {
  52. bad_page_fault(regs, address, SIGSEGV);
  53. return;
  54. }
  55. is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
  56. is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
  57. exccause == EXCCAUSE_ITLB_MISS ||
  58. exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
  59. #ifdef DEBUG_PAGE_FAULT
  60. printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
  61. address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
  62. #endif
  63. down_read(&mm->mmap_sem);
  64. vma = find_vma(mm, address);
  65. if (!vma)
  66. goto bad_area;
  67. if (vma->vm_start <= address)
  68. goto good_area;
  69. if (!(vma->vm_flags & VM_GROWSDOWN))
  70. goto bad_area;
  71. if (expand_stack(vma, address))
  72. goto bad_area;
  73. /* Ok, we have a good vm_area for this memory access, so
  74. * we can handle it..
  75. */
  76. good_area:
  77. info.si_code = SEGV_ACCERR;
  78. if (is_write) {
  79. if (!(vma->vm_flags & VM_WRITE))
  80. goto bad_area;
  81. } else if (is_exec) {
  82. if (!(vma->vm_flags & VM_EXEC))
  83. goto bad_area;
  84. } else /* Allow read even from write-only pages. */
  85. if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
  86. goto bad_area;
  87. /* If for any reason at all we couldn't handle the fault,
  88. * make sure we exit gracefully rather than endlessly redo
  89. * the fault.
  90. */
  91. survive:
  92. fault = handle_mm_fault(mm, vma, address, is_write);
  93. if (unlikely(fault & VM_FAULT_ERROR)) {
  94. if (fault & VM_FAULT_OOM)
  95. goto out_of_memory;
  96. else if (fault & VM_FAULT_SIGBUS)
  97. goto do_sigbus;
  98. BUG();
  99. }
  100. if (fault & VM_FAULT_MAJOR)
  101. current->maj_flt++;
  102. else
  103. current->min_flt++;
  104. up_read(&mm->mmap_sem);
  105. return;
  106. /* Something tried to access memory that isn't in our memory map..
  107. * Fix it, but check if it's kernel or user first..
  108. */
  109. bad_area:
  110. up_read(&mm->mmap_sem);
  111. if (user_mode(regs)) {
  112. current->thread.bad_vaddr = address;
  113. current->thread.error_code = is_write;
  114. info.si_signo = SIGSEGV;
  115. info.si_errno = 0;
  116. /* info.si_code has been set above */
  117. info.si_addr = (void *) address;
  118. force_sig_info(SIGSEGV, &info, current);
  119. return;
  120. }
  121. bad_page_fault(regs, address, SIGSEGV);
  122. return;
  123. /* We ran out of memory, or some other thing happened to us that made
  124. * us unable to handle the page fault gracefully.
  125. */
  126. out_of_memory:
  127. up_read(&mm->mmap_sem);
  128. if (is_global_init(current)) {
  129. yield();
  130. down_read(&mm->mmap_sem);
  131. goto survive;
  132. }
  133. printk("VM: killing process %s\n", current->comm);
  134. if (user_mode(regs))
  135. do_group_exit(SIGKILL);
  136. bad_page_fault(regs, address, SIGKILL);
  137. return;
  138. do_sigbus:
  139. up_read(&mm->mmap_sem);
  140. /* Send a sigbus, regardless of whether we were in kernel
  141. * or user mode.
  142. */
  143. current->thread.bad_vaddr = address;
  144. info.si_code = SIGBUS;
  145. info.si_errno = 0;
  146. info.si_code = BUS_ADRERR;
  147. info.si_addr = (void *) address;
  148. force_sig_info(SIGBUS, &info, current);
  149. /* Kernel mode? Handle exceptions or die */
  150. if (!user_mode(regs))
  151. bad_page_fault(regs, address, SIGBUS);
  152. vmalloc_fault:
  153. {
  154. /* Synchronize this task's top level page-table
  155. * with the 'reference' page table.
  156. */
  157. struct mm_struct *act_mm = current->active_mm;
  158. int index = pgd_index(address);
  159. pgd_t *pgd, *pgd_k;
  160. pmd_t *pmd, *pmd_k;
  161. pte_t *pte_k;
  162. if (act_mm == NULL)
  163. goto bad_page_fault;
  164. pgd = act_mm->pgd + index;
  165. pgd_k = init_mm.pgd + index;
  166. if (!pgd_present(*pgd_k))
  167. goto bad_page_fault;
  168. pgd_val(*pgd) = pgd_val(*pgd_k);
  169. pmd = pmd_offset(pgd, address);
  170. pmd_k = pmd_offset(pgd_k, address);
  171. if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
  172. goto bad_page_fault;
  173. pmd_val(*pmd) = pmd_val(*pmd_k);
  174. pte_k = pte_offset_kernel(pmd_k, address);
  175. if (!pte_present(*pte_k))
  176. goto bad_page_fault;
  177. return;
  178. }
  179. bad_page_fault:
  180. bad_page_fault(regs, address, SIGKILL);
  181. return;
  182. }
  183. void
  184. bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
  185. {
  186. extern void die(const char*, struct pt_regs*, long);
  187. const struct exception_table_entry *entry;
  188. /* Are we prepared to handle this kernel fault? */
  189. if ((entry = search_exception_tables(regs->pc)) != NULL) {
  190. #ifdef DEBUG_PAGE_FAULT
  191. printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
  192. current->comm, regs->pc, entry->fixup);
  193. #endif
  194. current->thread.bad_uaddr = address;
  195. regs->pc = entry->fixup;
  196. return;
  197. }
  198. /* Oops. The kernel tried to access some bad page. We'll have to
  199. * terminate things with extreme prejudice.
  200. */
  201. printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
  202. "address %08lx\n pc = %08lx, ra = %08lx\n",
  203. address, regs->pc, regs->areg[0]);
  204. die("Oops", regs, sig);
  205. do_exit(sig);
  206. }