tlbflush_64.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * arch/sh/mm/tlb-flush_64.c
  3. *
  4. * Copyright (C) 2000, 2001 Paolo Alberelli
  5. * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
  6. * Copyright (C) 2003 - 2009 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/signal.h>
  13. #include <linux/rwsem.h>
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/string.h>
  18. #include <linux/types.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/mman.h>
  21. #include <linux/mm.h>
  22. #include <linux/smp.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/interrupt.h>
  25. #include <asm/io.h>
  26. #include <asm/tlb.h>
  27. #include <asm/uaccess.h>
  28. #include <asm/pgalloc.h>
  29. #include <asm/mmu_context.h>
  30. extern void die(const char *,struct pt_regs *,long);
  31. #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
  32. #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
  33. static inline void print_prots(pgprot_t prot)
  34. {
  35. printk("prot is 0x%016llx\n",pgprot_val(prot));
  36. printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
  37. PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
  38. }
  39. static inline void print_vma(struct vm_area_struct *vma)
  40. {
  41. printk("vma start 0x%08lx\n", vma->vm_start);
  42. printk("vma end 0x%08lx\n", vma->vm_end);
  43. print_prots(vma->vm_page_prot);
  44. printk("vm_flags 0x%08lx\n", vma->vm_flags);
  45. }
  46. static inline void print_task(struct task_struct *tsk)
  47. {
  48. printk("Task pid %d\n", task_pid_nr(tsk));
  49. }
  50. static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
  51. {
  52. pgd_t *dir;
  53. pud_t *pud;
  54. pmd_t *pmd;
  55. pte_t *pte;
  56. pte_t entry;
  57. dir = pgd_offset(mm, address);
  58. if (pgd_none(*dir))
  59. return NULL;
  60. pud = pud_offset(dir, address);
  61. if (pud_none(*pud))
  62. return NULL;
  63. pmd = pmd_offset(pud, address);
  64. if (pmd_none(*pmd))
  65. return NULL;
  66. pte = pte_offset_kernel(pmd, address);
  67. entry = *pte;
  68. if (pte_none(entry) || !pte_present(entry))
  69. return NULL;
  70. return pte;
  71. }
  72. /*
  73. * This routine handles page faults. It determines the address,
  74. * and the problem, and then passes it off to one of the appropriate
  75. * routines.
  76. */
  77. asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
  78. unsigned long textaccess, unsigned long address)
  79. {
  80. struct task_struct *tsk;
  81. struct mm_struct *mm;
  82. struct vm_area_struct * vma;
  83. const struct exception_table_entry *fixup;
  84. pte_t *pte;
  85. int fault;
  86. /* SIM
  87. * Note this is now called with interrupts still disabled
  88. * This is to cope with being called for a missing IO port
  89. * address with interrupts disabled. This should be fixed as
  90. * soon as we have a better 'fast path' miss handler.
  91. *
  92. * Plus take care how you try and debug this stuff.
  93. * For example, writing debug data to a port which you
  94. * have just faulted on is not going to work.
  95. */
  96. tsk = current;
  97. mm = tsk->mm;
  98. /* Not an IO address, so reenable interrupts */
  99. local_irq_enable();
  100. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  101. /*
  102. * If we're in an interrupt or have no user
  103. * context, we must not take the fault..
  104. */
  105. if (in_atomic() || !mm)
  106. goto no_context;
  107. /* TLB misses upon some cache flushes get done under cli() */
  108. down_read(&mm->mmap_sem);
  109. vma = find_vma(mm, address);
  110. if (!vma) {
  111. #ifdef DEBUG_FAULT
  112. print_task(tsk);
  113. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  114. __func__, __LINE__,
  115. address,regs->pc,textaccess,writeaccess);
  116. show_regs(regs);
  117. #endif
  118. goto bad_area;
  119. }
  120. if (vma->vm_start <= address) {
  121. goto good_area;
  122. }
  123. if (!(vma->vm_flags & VM_GROWSDOWN)) {
  124. #ifdef DEBUG_FAULT
  125. print_task(tsk);
  126. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  127. __func__, __LINE__,
  128. address,regs->pc,textaccess,writeaccess);
  129. show_regs(regs);
  130. print_vma(vma);
  131. #endif
  132. goto bad_area;
  133. }
  134. if (expand_stack(vma, address)) {
  135. #ifdef DEBUG_FAULT
  136. print_task(tsk);
  137. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  138. __func__, __LINE__,
  139. address,regs->pc,textaccess,writeaccess);
  140. show_regs(regs);
  141. #endif
  142. goto bad_area;
  143. }
  144. /*
  145. * Ok, we have a good vm_area for this memory access, so
  146. * we can handle it..
  147. */
  148. good_area:
  149. if (textaccess) {
  150. if (!(vma->vm_flags & VM_EXEC))
  151. goto bad_area;
  152. } else {
  153. if (writeaccess) {
  154. if (!(vma->vm_flags & VM_WRITE))
  155. goto bad_area;
  156. } else {
  157. if (!(vma->vm_flags & VM_READ))
  158. goto bad_area;
  159. }
  160. }
  161. /*
  162. * If for any reason at all we couldn't handle the fault,
  163. * make sure we exit gracefully rather than endlessly redo
  164. * the fault.
  165. */
  166. fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
  167. if (unlikely(fault & VM_FAULT_ERROR)) {
  168. if (fault & VM_FAULT_OOM)
  169. goto out_of_memory;
  170. else if (fault & VM_FAULT_SIGBUS)
  171. goto do_sigbus;
  172. BUG();
  173. }
  174. if (fault & VM_FAULT_MAJOR) {
  175. tsk->maj_flt++;
  176. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  177. regs, address);
  178. } else {
  179. tsk->min_flt++;
  180. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  181. regs, address);
  182. }
  183. /* If we get here, the page fault has been handled. Do the TLB refill
  184. now from the newly-setup PTE, to avoid having to fault again right
  185. away on the same instruction. */
  186. pte = lookup_pte (mm, address);
  187. if (!pte) {
  188. /* From empirical evidence, we can get here, due to
  189. !pte_present(pte). (e.g. if a swap-in occurs, and the page
  190. is swapped back out again before the process that wanted it
  191. gets rescheduled?) */
  192. goto no_pte;
  193. }
  194. __do_tlb_refill(address, textaccess, pte);
  195. no_pte:
  196. up_read(&mm->mmap_sem);
  197. return;
  198. /*
  199. * Something tried to access memory that isn't in our memory map..
  200. * Fix it, but check if it's kernel or user first..
  201. */
  202. bad_area:
  203. #ifdef DEBUG_FAULT
  204. printk("fault:bad area\n");
  205. #endif
  206. up_read(&mm->mmap_sem);
  207. if (user_mode(regs)) {
  208. static int count=0;
  209. siginfo_t info;
  210. if (count < 4) {
  211. /* This is really to help debug faults when starting
  212. * usermode, so only need a few */
  213. count++;
  214. printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
  215. address, task_pid_nr(current), current->comm,
  216. (unsigned long) regs->pc);
  217. #if 0
  218. show_regs(regs);
  219. #endif
  220. }
  221. if (is_global_init(tsk)) {
  222. panic("INIT had user mode bad_area\n");
  223. }
  224. tsk->thread.address = address;
  225. tsk->thread.error_code = writeaccess;
  226. info.si_signo = SIGSEGV;
  227. info.si_errno = 0;
  228. info.si_addr = (void *) address;
  229. force_sig_info(SIGSEGV, &info, tsk);
  230. return;
  231. }
  232. no_context:
  233. #ifdef DEBUG_FAULT
  234. printk("fault:No context\n");
  235. #endif
  236. /* Are we prepared to handle this kernel fault? */
  237. fixup = search_exception_tables(regs->pc);
  238. if (fixup) {
  239. regs->pc = fixup->fixup;
  240. return;
  241. }
  242. /*
  243. * Oops. The kernel tried to access some bad page. We'll have to
  244. * terminate things with extreme prejudice.
  245. *
  246. */
  247. if (address < PAGE_SIZE)
  248. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
  249. else
  250. printk(KERN_ALERT "Unable to handle kernel paging request");
  251. printk(" at virtual address %08lx\n", address);
  252. printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
  253. die("Oops", regs, writeaccess);
  254. do_exit(SIGKILL);
  255. /*
  256. * We ran out of memory, or some other thing happened to us that made
  257. * us unable to handle the page fault gracefully.
  258. */
  259. out_of_memory:
  260. up_read(&mm->mmap_sem);
  261. if (!user_mode(regs))
  262. goto no_context;
  263. pagefault_out_of_memory();
  264. return;
  265. do_sigbus:
  266. printk("fault:Do sigbus\n");
  267. up_read(&mm->mmap_sem);
  268. /*
  269. * Send a sigbus, regardless of whether we were in kernel
  270. * or user mode.
  271. */
  272. tsk->thread.address = address;
  273. tsk->thread.error_code = writeaccess;
  274. tsk->thread.trap_no = 14;
  275. force_sig(SIGBUS, tsk);
  276. /* Kernel mode? Handle exceptions or die */
  277. if (!user_mode(regs))
  278. goto no_context;
  279. }
  280. void local_flush_tlb_one(unsigned long asid, unsigned long page)
  281. {
  282. unsigned long long match, pteh=0, lpage;
  283. unsigned long tlb;
  284. /*
  285. * Sign-extend based on neff.
  286. */
  287. lpage = neff_sign_extend(page);
  288. match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
  289. match |= lpage;
  290. for_each_itlb_entry(tlb) {
  291. asm volatile ("getcfg %1, 0, %0"
  292. : "=r" (pteh)
  293. : "r" (tlb) );
  294. if (pteh == match) {
  295. __flush_tlb_slot(tlb);
  296. break;
  297. }
  298. }
  299. for_each_dtlb_entry(tlb) {
  300. asm volatile ("getcfg %1, 0, %0"
  301. : "=r" (pteh)
  302. : "r" (tlb) );
  303. if (pteh == match) {
  304. __flush_tlb_slot(tlb);
  305. break;
  306. }
  307. }
  308. }
  309. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  310. {
  311. unsigned long flags;
  312. if (vma->vm_mm) {
  313. page &= PAGE_MASK;
  314. local_irq_save(flags);
  315. local_flush_tlb_one(get_asid(), page);
  316. local_irq_restore(flags);
  317. }
  318. }
  319. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  320. unsigned long end)
  321. {
  322. unsigned long flags;
  323. unsigned long long match, pteh=0, pteh_epn, pteh_low;
  324. unsigned long tlb;
  325. unsigned int cpu = smp_processor_id();
  326. struct mm_struct *mm;
  327. mm = vma->vm_mm;
  328. if (cpu_context(cpu, mm) == NO_CONTEXT)
  329. return;
  330. local_irq_save(flags);
  331. start &= PAGE_MASK;
  332. end &= PAGE_MASK;
  333. match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
  334. /* Flush ITLB */
  335. for_each_itlb_entry(tlb) {
  336. asm volatile ("getcfg %1, 0, %0"
  337. : "=r" (pteh)
  338. : "r" (tlb) );
  339. pteh_epn = pteh & PAGE_MASK;
  340. pteh_low = pteh & ~PAGE_MASK;
  341. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  342. __flush_tlb_slot(tlb);
  343. }
  344. /* Flush DTLB */
  345. for_each_dtlb_entry(tlb) {
  346. asm volatile ("getcfg %1, 0, %0"
  347. : "=r" (pteh)
  348. : "r" (tlb) );
  349. pteh_epn = pteh & PAGE_MASK;
  350. pteh_low = pteh & ~PAGE_MASK;
  351. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  352. __flush_tlb_slot(tlb);
  353. }
  354. local_irq_restore(flags);
  355. }
  356. void local_flush_tlb_mm(struct mm_struct *mm)
  357. {
  358. unsigned long flags;
  359. unsigned int cpu = smp_processor_id();
  360. if (cpu_context(cpu, mm) == NO_CONTEXT)
  361. return;
  362. local_irq_save(flags);
  363. cpu_context(cpu, mm) = NO_CONTEXT;
  364. if (mm == current->mm)
  365. activate_context(mm, cpu);
  366. local_irq_restore(flags);
  367. }
  368. void local_flush_tlb_all(void)
  369. {
  370. /* Invalidate all, including shared pages, excluding fixed TLBs */
  371. unsigned long flags, tlb;
  372. local_irq_save(flags);
  373. /* Flush each ITLB entry */
  374. for_each_itlb_entry(tlb)
  375. __flush_tlb_slot(tlb);
  376. /* Flush each DTLB entry */
  377. for_each_dtlb_entry(tlb)
  378. __flush_tlb_slot(tlb);
  379. local_irq_restore(flags);
  380. }
  381. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  382. {
  383. /* FIXME: Optimize this later.. */
  384. flush_tlb_all();
  385. }
  386. void __flush_tlb_global(void)
  387. {
  388. flush_tlb_all();
  389. }
  390. void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  391. {
  392. }