|
@@ -141,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_TLB_SANITY
|
|
|
+
|
|
|
+static unsigned get_pte_for_vaddr(unsigned vaddr)
|
|
|
+{
|
|
|
+ struct task_struct *task = get_current();
|
|
|
+ struct mm_struct *mm = task->mm;
|
|
|
+ pgd_t *pgd;
|
|
|
+ pmd_t *pmd;
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ if (!mm)
|
|
|
+ mm = task->active_mm;
|
|
|
+ pgd = pgd_offset(mm, vaddr);
|
|
|
+ if (pgd_none_or_clear_bad(pgd))
|
|
|
+ return 0;
|
|
|
+ pmd = pmd_offset(pgd, vaddr);
|
|
|
+ if (pmd_none_or_clear_bad(pmd))
|
|
|
+ return 0;
|
|
|
+ pte = pte_offset_map(pmd, vaddr);
|
|
|
+ if (!pte)
|
|
|
+ return 0;
|
|
|
+ return pte_val(*pte);
|
|
|
+}
|
|
|
+
|
|
|
+enum {
|
|
|
+ TLB_SUSPICIOUS = 1,
|
|
|
+ TLB_INSANE = 2,
|
|
|
+};
|
|
|
+
|
|
|
+static void tlb_insane(void)
|
|
|
+{
|
|
|
+ BUG_ON(1);
|
|
|
+}
|
|
|
+
|
|
|
+static void tlb_suspicious(void)
|
|
|
+{
|
|
|
+ WARN_ON(1);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
|
|
|
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
|
|
|
+ *
|
|
|
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
|
|
|
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
|
|
|
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
|
|
|
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
|
|
|
+ * but does not necessary means an error, thus marked as suspicious.
|
|
|
+ */
|
|
|
+static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
|
|
|
+{
|
|
|
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
|
|
|
+ unsigned r0 = dtlb ?
|
|
|
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
|
|
|
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
|
|
|
+ unsigned pte = get_pte_for_vaddr(vpn);
|
|
|
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
|
|
|
+ unsigned tlb_asid = r0 & ASID_MASK;
|
|
|
+ bool kernel = tlb_asid == 1;
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
|
|
|
+ pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
|
|
|
+ dtlb ? 'D' : 'I', w, e, vpn,
|
|
|
+ kernel ? "kernel" : "user");
|
|
|
+ rc |= TLB_INSANE;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tlb_asid == mm_asid) {
|
|
|
+ unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
|
|
|
+ read_itlb_translation(tlbidx);
|
|
|
+ if ((pte ^ r1) & PAGE_MASK) {
|
|
|
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
|
|
|
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);
|
|
|
+ if (pte == 0 || !pte_present(__pte(pte))) {
|
|
|
+ struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
|
|
|
+ pr_err("page refcount: %d, mapcount: %d\n",
|
|
|
+ page_count(p),
|
|
|
+ page_mapcount(p));
|
|
|
+ if (!page_count(p))
|
|
|
+ rc |= TLB_INSANE;
|
|
|
+ else if (page_mapped(p))
|
|
|
+ rc |= TLB_SUSPICIOUS;
|
|
|
+ } else {
|
|
|
+ rc |= TLB_INSANE;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+void check_tlb_sanity(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned w, e;
|
|
|
+ int bug = 0;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ for (w = 0; w < DTLB_ARF_WAYS; ++w)
|
|
|
+ for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
|
|
|
+ bug |= check_tlb_entry(w, e, true);
|
|
|
+ for (w = 0; w < ITLB_ARF_WAYS; ++w)
|
|
|
+ for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
|
|
|
+ bug |= check_tlb_entry(w, e, false);
|
|
|
+ if (bug & TLB_INSANE)
|
|
|
+ tlb_insane();
|
|
|
+ if (bug & TLB_SUSPICIOUS)
|
|
|
+ tlb_suspicious();
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_DEBUG_TLB_SANITY */
|