tlb.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * arch/xtensa/mm/tlb.c
  3. *
  4. * Logic that manipulates the Xtensa MMU. Derived from MIPS.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2003 Tensilica Inc.
  11. *
  12. * Joe Taylor
  13. * Chris Zankel <chris@zankel.net>
  14. * Marc Gauthier
  15. */
  16. #include <linux/mm.h>
  17. #include <asm/processor.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/cacheflush.h>
  21. static inline void __flush_itlb_all (void)
  22. {
  23. int w, i;
  24. for (w = 0; w < ITLB_ARF_WAYS; w++) {
  25. for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
  26. int e = w + (i << PAGE_SHIFT);
  27. invalidate_itlb_entry_no_isync(e);
  28. }
  29. }
  30. asm volatile ("isync\n");
  31. }
  32. static inline void __flush_dtlb_all (void)
  33. {
  34. int w, i;
  35. for (w = 0; w < DTLB_ARF_WAYS; w++) {
  36. for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
  37. int e = w + (i << PAGE_SHIFT);
  38. invalidate_dtlb_entry_no_isync(e);
  39. }
  40. }
  41. asm volatile ("isync\n");
  42. }
  43. void flush_tlb_all (void)
  44. {
  45. __flush_itlb_all();
  46. __flush_dtlb_all();
  47. }
  48. /* If mm is current, we simply assign the current task a new ASID, thus,
  49. * invalidating all previous tlb entries. If mm is someone else's user mapping,
  50. * wie invalidate the context, thus, when that user mapping is swapped in,
  51. * a new context will be assigned to it.
  52. */
  53. void flush_tlb_mm(struct mm_struct *mm)
  54. {
  55. if (mm == current->active_mm) {
  56. unsigned long flags;
  57. local_irq_save(flags);
  58. __get_new_mmu_context(mm);
  59. __load_mmu_context(mm);
  60. local_irq_restore(flags);
  61. }
  62. else
  63. mm->context = 0;
  64. }
  65. #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
  66. #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
  67. #if _ITLB_ENTRIES > _DTLB_ENTRIES
  68. # define _TLB_ENTRIES _ITLB_ENTRIES
  69. #else
  70. # define _TLB_ENTRIES _DTLB_ENTRIES
  71. #endif
  72. void flush_tlb_range (struct vm_area_struct *vma,
  73. unsigned long start, unsigned long end)
  74. {
  75. struct mm_struct *mm = vma->vm_mm;
  76. unsigned long flags;
  77. if (mm->context == NO_CONTEXT)
  78. return;
  79. #if 0
  80. printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
  81. (unsigned long)mm->context, start, end);
  82. #endif
  83. local_irq_save(flags);
  84. if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
  85. int oldpid = get_rasid_register();
  86. set_rasid_register (ASID_INSERT(mm->context));
  87. start &= PAGE_MASK;
  88. if (vma->vm_flags & VM_EXEC)
  89. while(start < end) {
  90. invalidate_itlb_mapping(start);
  91. invalidate_dtlb_mapping(start);
  92. start += PAGE_SIZE;
  93. }
  94. else
  95. while(start < end) {
  96. invalidate_dtlb_mapping(start);
  97. start += PAGE_SIZE;
  98. }
  99. set_rasid_register(oldpid);
  100. } else {
  101. flush_tlb_mm(mm);
  102. }
  103. local_irq_restore(flags);
  104. }
  105. void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
  106. {
  107. struct mm_struct* mm = vma->vm_mm;
  108. unsigned long flags;
  109. int oldpid;
  110. if(mm->context == NO_CONTEXT)
  111. return;
  112. local_irq_save(flags);
  113. oldpid = get_rasid_register();
  114. set_rasid_register(ASID_INSERT(mm->context));
  115. if (vma->vm_flags & VM_EXEC)
  116. invalidate_itlb_mapping(page);
  117. invalidate_dtlb_mapping(page);
  118. set_rasid_register(oldpid);
  119. local_irq_restore(flags);
  120. }
  121. #ifdef CONFIG_DEBUG_TLB_SANITY
  122. static unsigned get_pte_for_vaddr(unsigned vaddr)
  123. {
  124. struct task_struct *task = get_current();
  125. struct mm_struct *mm = task->mm;
  126. pgd_t *pgd;
  127. pmd_t *pmd;
  128. pte_t *pte;
  129. if (!mm)
  130. mm = task->active_mm;
  131. pgd = pgd_offset(mm, vaddr);
  132. if (pgd_none_or_clear_bad(pgd))
  133. return 0;
  134. pmd = pmd_offset(pgd, vaddr);
  135. if (pmd_none_or_clear_bad(pmd))
  136. return 0;
  137. pte = pte_offset_map(pmd, vaddr);
  138. if (!pte)
  139. return 0;
  140. return pte_val(*pte);
  141. }
  142. enum {
  143. TLB_SUSPICIOUS = 1,
  144. TLB_INSANE = 2,
  145. };
  146. static void tlb_insane(void)
  147. {
  148. BUG_ON(1);
  149. }
  150. static void tlb_suspicious(void)
  151. {
  152. WARN_ON(1);
  153. }
  154. /*
  155. * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
  156. * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
  157. *
  158. * Check that valid TLB entries either have the same PA as the PTE, or PTE is
  159. * marked as non-present. Non-present PTE and the page with non-zero refcount
  160. * and zero mapcount is normal for batched TLB flush operation. Zero refcount
  161. * means that the page was freed prematurely. Non-zero mapcount is unusual,
  162. * but does not necessary means an error, thus marked as suspicious.
  163. */
  164. static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
  165. {
  166. unsigned tlbidx = w | (e << PAGE_SHIFT);
  167. unsigned r0 = dtlb ?
  168. read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
  169. unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
  170. unsigned pte = get_pte_for_vaddr(vpn);
  171. unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
  172. unsigned tlb_asid = r0 & ASID_MASK;
  173. bool kernel = tlb_asid == 1;
  174. int rc = 0;
  175. if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
  176. pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
  177. dtlb ? 'D' : 'I', w, e, vpn,
  178. kernel ? "kernel" : "user");
  179. rc |= TLB_INSANE;
  180. }
  181. if (tlb_asid == mm_asid) {
  182. unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
  183. read_itlb_translation(tlbidx);
  184. if ((pte ^ r1) & PAGE_MASK) {
  185. pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
  186. dtlb ? 'D' : 'I', w, e, r0, r1, pte);
  187. if (pte == 0 || !pte_present(__pte(pte))) {
  188. struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
  189. pr_err("page refcount: %d, mapcount: %d\n",
  190. page_count(p),
  191. page_mapcount(p));
  192. if (!page_count(p))
  193. rc |= TLB_INSANE;
  194. else if (page_mapped(p))
  195. rc |= TLB_SUSPICIOUS;
  196. } else {
  197. rc |= TLB_INSANE;
  198. }
  199. }
  200. }
  201. return rc;
  202. }
  203. void check_tlb_sanity(void)
  204. {
  205. unsigned long flags;
  206. unsigned w, e;
  207. int bug = 0;
  208. local_irq_save(flags);
  209. for (w = 0; w < DTLB_ARF_WAYS; ++w)
  210. for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
  211. bug |= check_tlb_entry(w, e, true);
  212. for (w = 0; w < ITLB_ARF_WAYS; ++w)
  213. for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
  214. bug |= check_tlb_entry(w, e, false);
  215. if (bug & TLB_INSANE)
  216. tlb_insane();
  217. if (bug & TLB_SUSPICIOUS)
  218. tlb_suspicious();
  219. local_irq_restore(flags);
  220. }
  221. #endif /* CONFIG_DEBUG_TLB_SANITY */