fault_32.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Page fault handler for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 - 2008 Paul Mundt
  6. *
  7. * Based on linux/arch/i386/mm/fault.c:
  8. * Copyright (C) 1995 Linus Torvalds
  9. *
  10. * This file is subject to the terms and conditions of the GNU General Public
  11. * License. See the file "COPYING" in the main directory of this archive
  12. * for more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/hardirq.h>
  17. #include <linux/kprobes.h>
  18. #include <linux/perf_counter.h>
  19. #include <asm/io_trapped.h>
  20. #include <asm/system.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/tlbflush.h>
  23. static inline int notify_page_fault(struct pt_regs *regs, int trap)
  24. {
  25. int ret = 0;
  26. #ifdef CONFIG_KPROBES
  27. if (!user_mode(regs)) {
  28. preempt_disable();
  29. if (kprobe_running() && kprobe_fault_handler(regs, trap))
  30. ret = 1;
  31. preempt_enable();
  32. }
  33. #endif
  34. return ret;
  35. }
  36. /*
  37. * This routine handles page faults. It determines the address,
  38. * and the problem, and then passes it off to one of the appropriate
  39. * routines.
  40. */
  41. asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
  42. unsigned long writeaccess,
  43. unsigned long address)
  44. {
  45. struct task_struct *tsk;
  46. struct mm_struct *mm;
  47. struct vm_area_struct * vma;
  48. int si_code;
  49. int fault;
  50. siginfo_t info;
  51. /*
  52. * We don't bother with any notifier callbacks here, as they are
  53. * all handled through the __do_page_fault() fast-path.
  54. */
  55. tsk = current;
  56. si_code = SEGV_MAPERR;
  57. if (unlikely(address >= TASK_SIZE)) {
  58. /*
  59. * Synchronize this task's top level page-table
  60. * with the 'reference' page table.
  61. *
  62. * Do _not_ use "tsk" here. We might be inside
  63. * an interrupt in the middle of a task switch..
  64. */
  65. int offset = pgd_index(address);
  66. pgd_t *pgd, *pgd_k;
  67. pud_t *pud, *pud_k;
  68. pmd_t *pmd, *pmd_k;
  69. pgd = get_TTB() + offset;
  70. pgd_k = swapper_pg_dir + offset;
  71. if (!pgd_present(*pgd)) {
  72. if (!pgd_present(*pgd_k))
  73. goto bad_area_nosemaphore;
  74. set_pgd(pgd, *pgd_k);
  75. return;
  76. }
  77. pud = pud_offset(pgd, address);
  78. pud_k = pud_offset(pgd_k, address);
  79. if (!pud_present(*pud)) {
  80. if (!pud_present(*pud_k))
  81. goto bad_area_nosemaphore;
  82. set_pud(pud, *pud_k);
  83. return;
  84. }
  85. pmd = pmd_offset(pud, address);
  86. pmd_k = pmd_offset(pud_k, address);
  87. if (pmd_present(*pmd) || !pmd_present(*pmd_k))
  88. goto bad_area_nosemaphore;
  89. set_pmd(pmd, *pmd_k);
  90. return;
  91. }
  92. mm = tsk->mm;
  93. if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
  94. return;
  95. /* Only enable interrupts if they were on before the fault */
  96. if ((regs->sr & SR_IMASK) != SR_IMASK)
  97. local_irq_enable();
  98. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
  99. /*
  100. * If we're in an interrupt or have no user
  101. * context, we must not take the fault..
  102. */
  103. if (in_atomic() || !mm)
  104. goto no_context;
  105. down_read(&mm->mmap_sem);
  106. vma = find_vma(mm, address);
  107. if (!vma)
  108. goto bad_area;
  109. if (vma->vm_start <= address)
  110. goto good_area;
  111. if (!(vma->vm_flags & VM_GROWSDOWN))
  112. goto bad_area;
  113. if (expand_stack(vma, address))
  114. goto bad_area;
  115. /*
  116. * Ok, we have a good vm_area for this memory access, so
  117. * we can handle it..
  118. */
  119. good_area:
  120. si_code = SEGV_ACCERR;
  121. if (writeaccess) {
  122. if (!(vma->vm_flags & VM_WRITE))
  123. goto bad_area;
  124. } else {
  125. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  126. goto bad_area;
  127. }
  128. /*
  129. * If for any reason at all we couldn't handle the fault,
  130. * make sure we exit gracefully rather than endlessly redo
  131. * the fault.
  132. */
  133. survive:
  134. fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
  135. if (unlikely(fault & VM_FAULT_ERROR)) {
  136. if (fault & VM_FAULT_OOM)
  137. goto out_of_memory;
  138. else if (fault & VM_FAULT_SIGBUS)
  139. goto do_sigbus;
  140. BUG();
  141. }
  142. if (fault & VM_FAULT_MAJOR) {
  143. tsk->maj_flt++;
  144. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
  145. regs, address);
  146. } else {
  147. tsk->min_flt++;
  148. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
  149. regs, address);
  150. }
  151. up_read(&mm->mmap_sem);
  152. return;
  153. /*
  154. * Something tried to access memory that isn't in our memory map..
  155. * Fix it, but check if it's kernel or user first..
  156. */
  157. bad_area:
  158. up_read(&mm->mmap_sem);
  159. bad_area_nosemaphore:
  160. if (user_mode(regs)) {
  161. info.si_signo = SIGSEGV;
  162. info.si_errno = 0;
  163. info.si_code = si_code;
  164. info.si_addr = (void *) address;
  165. force_sig_info(SIGSEGV, &info, tsk);
  166. return;
  167. }
  168. no_context:
  169. /* Are we prepared to handle this kernel fault? */
  170. if (fixup_exception(regs))
  171. return;
  172. if (handle_trapped_io(regs, address))
  173. return;
  174. /*
  175. * Oops. The kernel tried to access some bad page. We'll have to
  176. * terminate things with extreme prejudice.
  177. *
  178. */
  179. bust_spinlocks(1);
  180. if (oops_may_print()) {
  181. unsigned long page;
  182. if (address < PAGE_SIZE)
  183. printk(KERN_ALERT "Unable to handle kernel NULL "
  184. "pointer dereference");
  185. else
  186. printk(KERN_ALERT "Unable to handle kernel paging "
  187. "request");
  188. printk(" at virtual address %08lx\n", address);
  189. printk(KERN_ALERT "pc = %08lx\n", regs->pc);
  190. page = (unsigned long)get_TTB();
  191. if (page) {
  192. page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
  193. printk(KERN_ALERT "*pde = %08lx\n", page);
  194. if (page & _PAGE_PRESENT) {
  195. page &= PAGE_MASK;
  196. address &= 0x003ff000;
  197. page = ((__typeof__(page) *)
  198. __va(page))[address >>
  199. PAGE_SHIFT];
  200. printk(KERN_ALERT "*pte = %08lx\n", page);
  201. }
  202. }
  203. }
  204. die("Oops", regs, writeaccess);
  205. bust_spinlocks(0);
  206. do_exit(SIGKILL);
  207. /*
  208. * We ran out of memory, or some other thing happened to us that made
  209. * us unable to handle the page fault gracefully.
  210. */
  211. out_of_memory:
  212. up_read(&mm->mmap_sem);
  213. if (is_global_init(current)) {
  214. yield();
  215. down_read(&mm->mmap_sem);
  216. goto survive;
  217. }
  218. printk("VM: killing process %s\n", tsk->comm);
  219. if (user_mode(regs))
  220. do_group_exit(SIGKILL);
  221. goto no_context;
  222. do_sigbus:
  223. up_read(&mm->mmap_sem);
  224. /*
  225. * Send a sigbus, regardless of whether we were in kernel
  226. * or user mode.
  227. */
  228. info.si_signo = SIGBUS;
  229. info.si_errno = 0;
  230. info.si_code = BUS_ADRERR;
  231. info.si_addr = (void *)address;
  232. force_sig_info(SIGBUS, &info, tsk);
  233. /* Kernel mode? Handle exceptions or die */
  234. if (!user_mode(regs))
  235. goto no_context;
  236. }
  237. /*
  238. * Called with interrupts disabled.
  239. */
  240. asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
  241. unsigned long writeaccess,
  242. unsigned long address)
  243. {
  244. pgd_t *pgd;
  245. pud_t *pud;
  246. pmd_t *pmd;
  247. pte_t *pte;
  248. pte_t entry;
  249. int ret = 1;
  250. /*
  251. * We don't take page faults for P1, P2, and parts of P4, these
  252. * are always mapped, whether it be due to legacy behaviour in
  253. * 29-bit mode, or due to PMB configuration in 32-bit mode.
  254. */
  255. if (address >= P3SEG && address < P3_ADDR_MAX) {
  256. pgd = pgd_offset_k(address);
  257. } else {
  258. if (unlikely(address >= TASK_SIZE || !current->mm))
  259. goto out;
  260. pgd = pgd_offset(current->mm, address);
  261. }
  262. pud = pud_offset(pgd, address);
  263. if (pud_none_or_clear_bad(pud))
  264. goto out;
  265. pmd = pmd_offset(pud, address);
  266. if (pmd_none_or_clear_bad(pmd))
  267. goto out;
  268. pte = pte_offset_kernel(pmd, address);
  269. entry = *pte;
  270. if (unlikely(pte_none(entry) || pte_not_present(entry)))
  271. goto out;
  272. if (unlikely(writeaccess && !pte_write(entry)))
  273. goto out;
  274. if (writeaccess)
  275. entry = pte_mkdirty(entry);
  276. entry = pte_mkyoung(entry);
  277. #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
  278. /*
  279. * ITLB is not affected by "ldtlb" instruction.
  280. * So, we need to flush the entry by ourselves.
  281. */
  282. local_flush_tlb_one(get_asid(), address & PAGE_MASK);
  283. #endif
  284. set_pte(pte, entry);
  285. update_mmu_cache(NULL, address, entry);
  286. ret = 0;
  287. out:
  288. return ret;
  289. }