|
@@ -21,20 +21,28 @@
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
-#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
|
|
|
+#define tlb_flush(tlb) \
|
|
|
+do { \
|
|
|
+ if (tlb->fullmm) \
|
|
|
+ flush_tlb_mm((tlb)->mm); \
|
|
|
+} while (0)
|
|
|
|
|
|
/*
|
|
|
* This pair is called at time of munmap/exit to flush cache and TLB entries
|
|
|
* for mappings being torn down.
|
|
|
* 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
|
|
|
* as we don't support aliasing configs in our VIPT D$.
|
|
|
- * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
|
|
|
- * albiet for difft reasons - its better handled by moving to new ASID
|
|
|
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
|
|
|
*
|
|
|
* Note, read http://lkml.org/lkml/2004/1/15/6
|
|
|
*/
|
|
|
#define tlb_start_vma(tlb, vma)
|
|
|
-#define tlb_end_vma(tlb, vma)
|
|
|
+
|
|
|
+#define tlb_end_vma(tlb, vma) \
|
|
|
+do { \
|
|
|
+ if (!tlb->fullmm) \
|
|
|
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
|
|
+} while (0)
|
|
|
|
|
|
#define __tlb_remove_tlb_entry(tlb, ptep, address)
|
|
|
|