|
@@ -106,15 +106,21 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
|
static inline void
|
|
|
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|
|
{
|
|
|
+ extern void kvm_local_flush_tlb_all(void);
|
|
|
unsigned long asid = asid_cache(cpu);
|
|
|
|
|
|
if (! ((asid += ASID_INC) & ASID_MASK) ) {
|
|
|
if (cpu_has_vtag_icache)
|
|
|
flush_icache_all();
|
|
|
+#ifdef CONFIG_VIRTUALIZATION
|
|
|
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
|
|
|
+#else
|
|
|
local_flush_tlb_all(); /* start new asid cycle */
|
|
|
+#endif
|
|
|
if (!asid) /* fix version if needed */
|
|
|
asid = ASID_FIRST_VERSION;
|
|
|
}
|
|
|
+
|
|
|
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
|
|
}
|
|
|
|