fault_32.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Page fault handler for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 - 2008 Paul Mundt
  6. *
  7. * Based on linux/arch/i386/mm/fault.c:
  8. * Copyright (C) 1995 Linus Torvalds
  9. *
  10. * This file is subject to the terms and conditions of the GNU General Public
  11. * License. See the file "COPYING" in the main directory of this archive
  12. * for more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/hardirq.h>
  17. #include <linux/kprobes.h>
  18. #include <asm/io_trapped.h>
  19. #include <asm/system.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/kgdb.h>
  23. static inline int notify_page_fault(struct pt_regs *regs, int trap)
  24. {
  25. int ret = 0;
  26. #ifdef CONFIG_KPROBES
  27. if (!user_mode(regs)) {
  28. preempt_disable();
  29. if (kprobe_running() && kprobe_fault_handler(regs, trap))
  30. ret = 1;
  31. preempt_enable();
  32. }
  33. #endif
  34. return ret;
  35. }
  36. /*
  37. * This routine handles page faults. It determines the address,
  38. * and the problem, and then passes it off to one of the appropriate
  39. * routines.
  40. */
  41. asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
  42. unsigned long writeaccess,
  43. unsigned long address)
  44. {
  45. struct task_struct *tsk;
  46. struct mm_struct *mm;
  47. struct vm_area_struct * vma;
  48. int si_code;
  49. int fault;
  50. siginfo_t info;
  51. /*
  52. * We don't bother with any notifier callbacks here, as they are
  53. * all handled through the __do_page_fault() fast-path.
  54. */
  55. tsk = current;
  56. si_code = SEGV_MAPERR;
  57. if (unlikely(address >= TASK_SIZE)) {
  58. /*
  59. * Synchronize this task's top level page-table
  60. * with the 'reference' page table.
  61. *
  62. * Do _not_ use "tsk" here. We might be inside
  63. * an interrupt in the middle of a task switch..
  64. */
  65. int offset = pgd_index(address);
  66. pgd_t *pgd, *pgd_k;
  67. pud_t *pud, *pud_k;
  68. pmd_t *pmd, *pmd_k;
  69. pgd = get_TTB() + offset;
  70. pgd_k = swapper_pg_dir + offset;
  71. if (!pgd_present(*pgd)) {
  72. if (!pgd_present(*pgd_k))
  73. goto bad_area_nosemaphore;
  74. set_pgd(pgd, *pgd_k);
  75. return;
  76. }
  77. pud = pud_offset(pgd, address);
  78. pud_k = pud_offset(pgd_k, address);
  79. if (!pud_present(*pud)) {
  80. if (!pud_present(*pud_k))
  81. goto bad_area_nosemaphore;
  82. set_pud(pud, *pud_k);
  83. return;
  84. }
  85. pmd = pmd_offset(pud, address);
  86. pmd_k = pmd_offset(pud_k, address);
  87. if (pmd_present(*pmd) || !pmd_present(*pmd_k))
  88. goto bad_area_nosemaphore;
  89. set_pmd(pmd, *pmd_k);
  90. return;
  91. }
  92. /* Only enable interrupts if they were on before the fault */
  93. if ((regs->sr & SR_IMASK) != SR_IMASK) {
  94. trace_hardirqs_on();
  95. local_irq_enable();
  96. }
  97. mm = tsk->mm;
  98. /*
  99. * If we're in an interrupt or have no user
  100. * context, we must not take the fault..
  101. */
  102. if (in_atomic() || !mm)
  103. goto no_context;
  104. down_read(&mm->mmap_sem);
  105. vma = find_vma(mm, address);
  106. if (!vma)
  107. goto bad_area;
  108. if (vma->vm_start <= address)
  109. goto good_area;
  110. if (!(vma->vm_flags & VM_GROWSDOWN))
  111. goto bad_area;
  112. if (expand_stack(vma, address))
  113. goto bad_area;
  114. /*
  115. * Ok, we have a good vm_area for this memory access, so
  116. * we can handle it..
  117. */
  118. good_area:
  119. si_code = SEGV_ACCERR;
  120. if (writeaccess) {
  121. if (!(vma->vm_flags & VM_WRITE))
  122. goto bad_area;
  123. } else {
  124. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  125. goto bad_area;
  126. }
  127. /*
  128. * If for any reason at all we couldn't handle the fault,
  129. * make sure we exit gracefully rather than endlessly redo
  130. * the fault.
  131. */
  132. survive:
  133. fault = handle_mm_fault(mm, vma, address, writeaccess);
  134. if (unlikely(fault & VM_FAULT_ERROR)) {
  135. if (fault & VM_FAULT_OOM)
  136. goto out_of_memory;
  137. else if (fault & VM_FAULT_SIGBUS)
  138. goto do_sigbus;
  139. BUG();
  140. }
  141. if (fault & VM_FAULT_MAJOR)
  142. tsk->maj_flt++;
  143. else
  144. tsk->min_flt++;
  145. up_read(&mm->mmap_sem);
  146. return;
  147. /*
  148. * Something tried to access memory that isn't in our memory map..
  149. * Fix it, but check if it's kernel or user first..
  150. */
  151. bad_area:
  152. up_read(&mm->mmap_sem);
  153. bad_area_nosemaphore:
  154. if (user_mode(regs)) {
  155. info.si_signo = SIGSEGV;
  156. info.si_errno = 0;
  157. info.si_code = si_code;
  158. info.si_addr = (void *) address;
  159. force_sig_info(SIGSEGV, &info, tsk);
  160. return;
  161. }
  162. no_context:
  163. /* Are we prepared to handle this kernel fault? */
  164. if (fixup_exception(regs))
  165. return;
  166. if (handle_trapped_io(regs, address))
  167. return;
  168. /*
  169. * Oops. The kernel tried to access some bad page. We'll have to
  170. * terminate things with extreme prejudice.
  171. *
  172. */
  173. bust_spinlocks(1);
  174. if (oops_may_print()) {
  175. unsigned long page;
  176. if (address < PAGE_SIZE)
  177. printk(KERN_ALERT "Unable to handle kernel NULL "
  178. "pointer dereference");
  179. else
  180. printk(KERN_ALERT "Unable to handle kernel paging "
  181. "request");
  182. printk(" at virtual address %08lx\n", address);
  183. printk(KERN_ALERT "pc = %08lx\n", regs->pc);
  184. page = (unsigned long)get_TTB();
  185. if (page) {
  186. page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
  187. printk(KERN_ALERT "*pde = %08lx\n", page);
  188. if (page & _PAGE_PRESENT) {
  189. page &= PAGE_MASK;
  190. address &= 0x003ff000;
  191. page = ((__typeof__(page) *)
  192. __va(page))[address >>
  193. PAGE_SHIFT];
  194. printk(KERN_ALERT "*pte = %08lx\n", page);
  195. }
  196. }
  197. }
  198. die("Oops", regs, writeaccess);
  199. bust_spinlocks(0);
  200. do_exit(SIGKILL);
  201. /*
  202. * We ran out of memory, or some other thing happened to us that made
  203. * us unable to handle the page fault gracefully.
  204. */
  205. out_of_memory:
  206. up_read(&mm->mmap_sem);
  207. if (is_global_init(current)) {
  208. yield();
  209. down_read(&mm->mmap_sem);
  210. goto survive;
  211. }
  212. printk("VM: killing process %s\n", tsk->comm);
  213. if (user_mode(regs))
  214. do_group_exit(SIGKILL);
  215. goto no_context;
  216. do_sigbus:
  217. up_read(&mm->mmap_sem);
  218. /*
  219. * Send a sigbus, regardless of whether we were in kernel
  220. * or user mode.
  221. */
  222. info.si_signo = SIGBUS;
  223. info.si_errno = 0;
  224. info.si_code = BUS_ADRERR;
  225. info.si_addr = (void *)address;
  226. force_sig_info(SIGBUS, &info, tsk);
  227. /* Kernel mode? Handle exceptions or die */
  228. if (!user_mode(regs))
  229. goto no_context;
  230. }
  231. #ifdef CONFIG_SH_STORE_QUEUES
  232. /*
  233. * This is a special case for the SH-4 store queues, as pages for this
  234. * space still need to be faulted in before it's possible to flush the
  235. * store queue cache for writeout to the remapped region.
  236. */
  237. #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
  238. #else
  239. #define P3_ADDR_MAX P4SEG
  240. #endif
  241. /*
  242. * Called with interrupts disabled.
  243. */
  244. asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
  245. unsigned long writeaccess,
  246. unsigned long address)
  247. {
  248. pgd_t *pgd;
  249. pud_t *pud;
  250. pmd_t *pmd;
  251. pte_t *pte;
  252. pte_t entry;
  253. if (notify_page_fault(regs, lookup_exception_vector()))
  254. return 0;
  255. #ifdef CONFIG_SH_KGDB
  256. if (kgdb_nofault && kgdb_bus_err_hook)
  257. kgdb_bus_err_hook();
  258. #endif
  259. /*
  260. * We don't take page faults for P1, P2, and parts of P4, these
  261. * are always mapped, whether it be due to legacy behaviour in
  262. * 29-bit mode, or due to PMB configuration in 32-bit mode.
  263. */
  264. if (address >= P3SEG && address < P3_ADDR_MAX) {
  265. pgd = pgd_offset_k(address);
  266. } else {
  267. if (unlikely(address >= TASK_SIZE || !current->mm))
  268. return 1;
  269. pgd = pgd_offset(current->mm, address);
  270. }
  271. pud = pud_offset(pgd, address);
  272. if (pud_none_or_clear_bad(pud))
  273. return 1;
  274. pmd = pmd_offset(pud, address);
  275. if (pmd_none_or_clear_bad(pmd))
  276. return 1;
  277. pte = pte_offset_kernel(pmd, address);
  278. entry = *pte;
  279. if (unlikely(pte_none(entry) || pte_not_present(entry)))
  280. return 1;
  281. if (unlikely(writeaccess && !pte_write(entry)))
  282. return 1;
  283. if (writeaccess)
  284. entry = pte_mkdirty(entry);
  285. entry = pte_mkyoung(entry);
  286. #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
  287. /*
  288. * ITLB is not affected by "ldtlb" instruction.
  289. * So, we need to flush the entry by ourselves.
  290. */
  291. local_flush_tlb_one(get_asid(), address & PAGE_MASK);
  292. #endif
  293. set_pte(pte, entry);
  294. update_mmu_cache(NULL, address, entry);
  295. return 0;
  296. }