|
@@ -38,22 +38,19 @@ void *kmap_atomic(struct page *page, enum km_type type)
|
|
|
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
-#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
if (!pte_none(*(kmap_pte-idx)))
|
|
|
BUG();
|
|
|
-#endif
|
|
|
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
|
|
- __flush_tlb_one(vaddr);
|
|
|
|
|
|
return (void*) vaddr;
|
|
|
}
|
|
|
|
|
|
void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
|
{
|
|
|
-#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
|
|
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
|
|
|
+#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
|
|
|
dec_preempt_count();
|
|
|
preempt_check_resched();
|
|
@@ -62,14 +59,14 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
|
|
|
|
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
|
|
|
BUG();
|
|
|
-
|
|
|
+#endif
|
|
|
/*
|
|
|
- * force other mappings to Oops if they'll try to access
|
|
|
- * this pte without first remap it
|
|
|
+ * Force other mappings to Oops if they'll try to access this pte
|
|
|
+ * without first remap it. Keeping stale mappings around is a bad idea
|
|
|
+ * also, in case the page changes cacheability attributes or becomes
|
|
|
+ * a protected page in a hypervisor.
|
|
|
*/
|
|
|
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
|
|
- __flush_tlb_one(vaddr);
|
|
|
-#endif
|
|
|
+ kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
|
|
|
|
dec_preempt_count();
|
|
|
preempt_check_resched();
|
|
@@ -88,7 +85,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
|
|
|
- __flush_tlb_one(vaddr);
|
|
|
|
|
|
return (void*) vaddr;
|
|
|
}
|