|
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
|
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
|
|
|
|
|
|
if (kvaddr >= (void *)FIXADDR_START) {
|
|
|
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
|
|
+ if (cache_is_vivt())
|
|
|
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
|
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
|
|
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
|
|
|
pte = TOP_PTE(vaddr);
|
|
|
return pte_page(*pte);
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_CACHE_VIPT
|
|
|
+
|
|
|
+#include <linux/percpu.h>
|
|
|
+
|
|
|
+/*
|
|
|
+ * The VIVT cache of a highmem page is always flushed before the page
|
|
|
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
|
|
|
+ * in that case.
|
|
|
+ *
|
|
|
+ * However unmapped pages may still be cached with a VIPT cache, and
|
|
|
+ * it is not possible to perform cache maintenance on them using physical
|
|
|
+ * addresses unfortunately. So we have no choice but to set up a temporary
|
|
|
+ * virtual mapping for that purpose.
|
|
|
+ *
|
|
|
+ * Yet this VIPT cache maintenance may be triggered from DMA support
|
|
|
+ * functions which are possibly called from interrupt context. As we don't
|
|
|
+ * want to keep interrupt disabled all the time when such maintenance is
|
|
|
+ * taking place, we therefore allow for some reentrancy by preserving and
|
|
|
+ * restoring the previous fixmap entry before the interrupted context is
|
|
|
+ * resumed. If the reentrancy depth is 0 then there is no need to restore
|
|
|
+ * the previous fixmap, and leaving the current one in place allow it to
|
|
|
+ * be reused the next time without a TLB flush (common with DMA).
|
|
|
+ */
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
|
|
|
+
|
|
|
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
|
|
|
+{
|
|
|
+ unsigned int idx, cpu = smp_processor_id();
|
|
|
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
|
|
|
+ unsigned long vaddr, flags;
|
|
|
+ pte_t pte, *ptep;
|
|
|
+
|
|
|
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
|
|
|
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
+ ptep = TOP_PTE(vaddr);
|
|
|
+ pte = mk_pte(page, kmap_prot);
|
|
|
+
|
|
|
+ if (!in_interrupt())
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ (*depth)++;
|
|
|
+ if (pte_val(*ptep) == pte_val(pte)) {
|
|
|
+ *saved_pte = pte;
|
|
|
+ } else {
|
|
|
+ *saved_pte = *ptep;
|
|
|
+ set_pte_ext(ptep, pte, 0);
|
|
|
+ local_flush_tlb_kernel_page(vaddr);
|
|
|
+ }
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
+ return (void *)vaddr;
|
|
|
+}
|
|
|
+
|
|
|
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
|
|
|
+{
|
|
|
+ unsigned int idx, cpu = smp_processor_id();
|
|
|
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
|
|
|
+ unsigned long vaddr, flags;
|
|
|
+ pte_t pte, *ptep;
|
|
|
+
|
|
|
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
|
|
|
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
+ ptep = TOP_PTE(vaddr);
|
|
|
+ pte = mk_pte(page, kmap_prot);
|
|
|
+
|
|
|
+ BUG_ON(pte_val(*ptep) != pte_val(pte));
|
|
|
+ BUG_ON(*depth <= 0);
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ (*depth)--;
|
|
|
+ if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
|
|
|
+ set_pte_ext(ptep, saved_pte, 0);
|
|
|
+ local_flush_tlb_kernel_page(vaddr);
|
|
|
+ }
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
+ if (!in_interrupt())
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* CONFIG_CPU_CACHE_VIPT */
|