tlbflush_64.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /*
  2. * arch/sh/mm/tlb-flush_64.c
  3. *
  4. * Copyright (C) 2000, 2001 Paolo Alberelli
  5. * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
  6. * Copyright (C) 2003 - 2012 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/signal.h>
  13. #include <linux/rwsem.h>
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/string.h>
  18. #include <linux/types.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/mman.h>
  21. #include <linux/mm.h>
  22. #include <linux/smp.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/interrupt.h>
  25. #include <asm/io.h>
  26. #include <asm/tlb.h>
  27. #include <asm/uaccess.h>
  28. #include <asm/pgalloc.h>
  29. #include <asm/mmu_context.h>
  30. static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
  31. {
  32. pgd_t *dir;
  33. pud_t *pud;
  34. pmd_t *pmd;
  35. pte_t *pte;
  36. pte_t entry;
  37. dir = pgd_offset(mm, address);
  38. if (pgd_none(*dir))
  39. return NULL;
  40. pud = pud_offset(dir, address);
  41. if (pud_none(*pud))
  42. return NULL;
  43. pmd = pmd_offset(pud, address);
  44. if (pmd_none(*pmd))
  45. return NULL;
  46. pte = pte_offset_kernel(pmd, address);
  47. entry = *pte;
  48. if (pte_none(entry) || !pte_present(entry))
  49. return NULL;
  50. return pte;
  51. }
  52. /*
  53. * This routine handles page faults. It determines the address,
  54. * and the problem, and then passes it off to one of the appropriate
  55. * routines.
  56. */
  57. asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  58. unsigned long address)
  59. {
  60. struct task_struct *tsk;
  61. struct mm_struct *mm;
  62. struct vm_area_struct * vma;
  63. const struct exception_table_entry *fixup;
  64. int write = error_code & FAULT_CODE_WRITE;
  65. int textaccess = error_code & FAULT_CODE_ITLB;
  66. unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  67. (write ? FAULT_FLAG_WRITE : 0));
  68. pte_t *pte;
  69. int fault;
  70. /* SIM
  71. * Note this is now called with interrupts still disabled
  72. * This is to cope with being called for a missing IO port
  73. * address with interrupts disabled. This should be fixed as
  74. * soon as we have a better 'fast path' miss handler.
  75. *
  76. * Plus take care how you try and debug this stuff.
  77. * For example, writing debug data to a port which you
  78. * have just faulted on is not going to work.
  79. */
  80. tsk = current;
  81. mm = tsk->mm;
  82. /* Not an IO address, so reenable interrupts */
  83. local_irq_enable();
  84. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  85. /*
  86. * If we're in an interrupt or have no user
  87. * context, we must not take the fault..
  88. */
  89. if (in_atomic() || !mm)
  90. goto no_context;
  91. retry:
  92. /* TLB misses upon some cache flushes get done under cli() */
  93. down_read(&mm->mmap_sem);
  94. vma = find_vma(mm, address);
  95. if (!vma)
  96. goto bad_area;
  97. if (vma->vm_start <= address)
  98. goto good_area;
  99. if (!(vma->vm_flags & VM_GROWSDOWN))
  100. goto bad_area;
  101. if (expand_stack(vma, address))
  102. goto bad_area;
  103. /*
  104. * Ok, we have a good vm_area for this memory access, so
  105. * we can handle it..
  106. */
  107. good_area:
  108. if (textaccess) {
  109. if (!(vma->vm_flags & VM_EXEC))
  110. goto bad_area;
  111. } else {
  112. if (write) {
  113. if (!(vma->vm_flags & VM_WRITE))
  114. goto bad_area;
  115. } else {
  116. if (!(vma->vm_flags & VM_READ))
  117. goto bad_area;
  118. }
  119. }
  120. /*
  121. * If for any reason at all we couldn't handle the fault,
  122. * make sure we exit gracefully rather than endlessly redo
  123. * the fault.
  124. */
  125. fault = handle_mm_fault(mm, vma, address, flags);
  126. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  127. return;
  128. if (unlikely(fault & VM_FAULT_ERROR)) {
  129. if (fault & VM_FAULT_OOM)
  130. goto out_of_memory;
  131. else if (fault & VM_FAULT_SIGBUS)
  132. goto do_sigbus;
  133. BUG();
  134. }
  135. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  136. if (fault & VM_FAULT_MAJOR) {
  137. tsk->maj_flt++;
  138. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  139. regs, address);
  140. } else {
  141. tsk->min_flt++;
  142. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  143. regs, address);
  144. }
  145. if (fault & VM_FAULT_RETRY) {
  146. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  147. /*
  148. * No need to up_read(&mm->mmap_sem) as we would
  149. * have already released it in __lock_page_or_retry
  150. * in mm/filemap.c.
  151. */
  152. goto retry;
  153. }
  154. }
  155. /* If we get here, the page fault has been handled. Do the TLB refill
  156. now from the newly-setup PTE, to avoid having to fault again right
  157. away on the same instruction. */
  158. pte = lookup_pte (mm, address);
  159. if (!pte) {
  160. /* From empirical evidence, we can get here, due to
  161. !pte_present(pte). (e.g. if a swap-in occurs, and the page
  162. is swapped back out again before the process that wanted it
  163. gets rescheduled?) */
  164. goto no_pte;
  165. }
  166. __do_tlb_refill(address, textaccess, pte);
  167. no_pte:
  168. up_read(&mm->mmap_sem);
  169. return;
  170. /*
  171. * Something tried to access memory that isn't in our memory map..
  172. * Fix it, but check if it's kernel or user first..
  173. */
  174. bad_area:
  175. up_read(&mm->mmap_sem);
  176. if (user_mode(regs)) {
  177. static int count=0;
  178. siginfo_t info;
  179. if (count < 4) {
  180. /* This is really to help debug faults when starting
  181. * usermode, so only need a few */
  182. count++;
  183. printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
  184. address, task_pid_nr(current), current->comm,
  185. (unsigned long) regs->pc);
  186. }
  187. if (is_global_init(tsk)) {
  188. panic("INIT had user mode bad_area\n");
  189. }
  190. tsk->thread.address = address;
  191. info.si_signo = SIGSEGV;
  192. info.si_errno = 0;
  193. info.si_addr = (void *) address;
  194. force_sig_info(SIGSEGV, &info, tsk);
  195. return;
  196. }
  197. no_context:
  198. /* Are we prepared to handle this kernel fault? */
  199. fixup = search_exception_tables(regs->pc);
  200. if (fixup) {
  201. regs->pc = fixup->fixup;
  202. return;
  203. }
  204. /*
  205. * Oops. The kernel tried to access some bad page. We'll have to
  206. * terminate things with extreme prejudice.
  207. *
  208. */
  209. if (address < PAGE_SIZE)
  210. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
  211. else
  212. printk(KERN_ALERT "Unable to handle kernel paging request");
  213. printk(" at virtual address %08lx\n", address);
  214. printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
  215. die("Oops", regs, error_code);
  216. do_exit(SIGKILL);
  217. /*
  218. * We ran out of memory, or some other thing happened to us that made
  219. * us unable to handle the page fault gracefully.
  220. */
  221. out_of_memory:
  222. up_read(&mm->mmap_sem);
  223. if (!user_mode(regs))
  224. goto no_context;
  225. pagefault_out_of_memory();
  226. return;
  227. do_sigbus:
  228. printk("fault:Do sigbus\n");
  229. up_read(&mm->mmap_sem);
  230. /*
  231. * Send a sigbus, regardless of whether we were in kernel
  232. * or user mode.
  233. */
  234. tsk->thread.address = address;
  235. force_sig(SIGBUS, tsk);
  236. /* Kernel mode? Handle exceptions or die */
  237. if (!user_mode(regs))
  238. goto no_context;
  239. }
  240. void local_flush_tlb_one(unsigned long asid, unsigned long page)
  241. {
  242. unsigned long long match, pteh=0, lpage;
  243. unsigned long tlb;
  244. /*
  245. * Sign-extend based on neff.
  246. */
  247. lpage = neff_sign_extend(page);
  248. match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
  249. match |= lpage;
  250. for_each_itlb_entry(tlb) {
  251. asm volatile ("getcfg %1, 0, %0"
  252. : "=r" (pteh)
  253. : "r" (tlb) );
  254. if (pteh == match) {
  255. __flush_tlb_slot(tlb);
  256. break;
  257. }
  258. }
  259. for_each_dtlb_entry(tlb) {
  260. asm volatile ("getcfg %1, 0, %0"
  261. : "=r" (pteh)
  262. : "r" (tlb) );
  263. if (pteh == match) {
  264. __flush_tlb_slot(tlb);
  265. break;
  266. }
  267. }
  268. }
  269. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  270. {
  271. unsigned long flags;
  272. if (vma->vm_mm) {
  273. page &= PAGE_MASK;
  274. local_irq_save(flags);
  275. local_flush_tlb_one(get_asid(), page);
  276. local_irq_restore(flags);
  277. }
  278. }
  279. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  280. unsigned long end)
  281. {
  282. unsigned long flags;
  283. unsigned long long match, pteh=0, pteh_epn, pteh_low;
  284. unsigned long tlb;
  285. unsigned int cpu = smp_processor_id();
  286. struct mm_struct *mm;
  287. mm = vma->vm_mm;
  288. if (cpu_context(cpu, mm) == NO_CONTEXT)
  289. return;
  290. local_irq_save(flags);
  291. start &= PAGE_MASK;
  292. end &= PAGE_MASK;
  293. match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
  294. /* Flush ITLB */
  295. for_each_itlb_entry(tlb) {
  296. asm volatile ("getcfg %1, 0, %0"
  297. : "=r" (pteh)
  298. : "r" (tlb) );
  299. pteh_epn = pteh & PAGE_MASK;
  300. pteh_low = pteh & ~PAGE_MASK;
  301. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  302. __flush_tlb_slot(tlb);
  303. }
  304. /* Flush DTLB */
  305. for_each_dtlb_entry(tlb) {
  306. asm volatile ("getcfg %1, 0, %0"
  307. : "=r" (pteh)
  308. : "r" (tlb) );
  309. pteh_epn = pteh & PAGE_MASK;
  310. pteh_low = pteh & ~PAGE_MASK;
  311. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  312. __flush_tlb_slot(tlb);
  313. }
  314. local_irq_restore(flags);
  315. }
  316. void local_flush_tlb_mm(struct mm_struct *mm)
  317. {
  318. unsigned long flags;
  319. unsigned int cpu = smp_processor_id();
  320. if (cpu_context(cpu, mm) == NO_CONTEXT)
  321. return;
  322. local_irq_save(flags);
  323. cpu_context(cpu, mm) = NO_CONTEXT;
  324. if (mm == current->mm)
  325. activate_context(mm, cpu);
  326. local_irq_restore(flags);
  327. }
  328. void local_flush_tlb_all(void)
  329. {
  330. /* Invalidate all, including shared pages, excluding fixed TLBs */
  331. unsigned long flags, tlb;
  332. local_irq_save(flags);
  333. /* Flush each ITLB entry */
  334. for_each_itlb_entry(tlb)
  335. __flush_tlb_slot(tlb);
  336. /* Flush each DTLB entry */
  337. for_each_dtlb_entry(tlb)
  338. __flush_tlb_slot(tlb);
  339. local_irq_restore(flags);
  340. }
  341. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  342. {
  343. /* FIXME: Optimize this later.. */
  344. flush_tlb_all();
  345. }
  346. void __flush_tlb_global(void)
  347. {
  348. flush_tlb_all();
  349. }
  350. void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  351. {
  352. }