Browse Source

ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID

ASID allocation changes/1

This patch does 2 things:

(1) get_new_mmu_context() NOW moves mm->ASID to a new value ONLY if it
    was from a prev allocation cycle/generation OR if mm had no ASID
    allocated (vs. before would unconditionally moving to a new ASID)

    Callers desiring unconditional update of ASID, e.g.local_flush_tlb_mm()
    (for parent's address space invalidation at fork) need to first force
    the parent to an unallocated ASID.

(2) get_new_mmu_context() always sets the MMU PID reg with unchanged/new
    ASID value.

The gains are:
- consolidation of all asid alloc logic into get_new_mmu_context()
- avoiding code duplication in switch_mm() for PID reg setting
- Enables future change to fold activate_mm() into switch_mm()

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Vineet Gupta 12 years ago
parent
commit
3daa48d1d9
2 changed files with 25 additions and 33 deletions
  1. 18 27
      arch/arc/include/asm/mmu_context.h
  2. 7 6
      arch/arc/mm/tlb.c

+ 18 - 27
arch/arc/include/asm/mmu_context.h

@@ -69,8 +69,8 @@ extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
 extern int asid_cache;
 
 /*
- * Assign a new ASID to task. If the task already has an ASID, it is
- * relinquished.
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
  */
 static inline void get_new_mmu_context(struct mm_struct *mm)
 {
@@ -79,6 +79,17 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
 
 	local_irq_save(flags);
 
+	/*
+	 * Move to new ASID if it was not from current alloc-cycle/generation.
+	 *
+	 * Note: Callers needing new ASID unconditionally, independent of
+	 * 	 generation, e.g. local_flush_tlb_mm() for forking  parent,
+	 * 	 first need to destroy the context, setting it to invalid
+	 * 	 value.
+	 */
+	if (mm->context.asid <= asid_cache)
+		goto set_hw;
+
 	/*
 	 * Relinquish the currently owned ASID (if any).
 	 * Doing unconditionally saves a cmp-n-branch; for already unused
@@ -99,9 +110,9 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
 	 * task with ASID from prev allocation cycle (before ASID roll-over).
 	 *
 	 * This might look wrong - if we are re-using some other task's ASID,
-	 * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+	 * won't we use it's stale TLB entries too. Actually the algorithm takes
 	 * care of such a case: it ensures that task with ASID from prev alloc
-	 * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+	 * cycle, when scheduled will refresh it's ASID
 	 * The stealing scenario described here will only happen if that task
 	 * didn't get a chance to refresh it's ASID - implying stale entries
 	 * won't exist.
@@ -114,7 +125,8 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
 	asid_mm_map[asid_cache] = mm;
 	mm->context.asid = asid_cache;
 
-	write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+set_hw:
+	write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE);
 
 	local_irq_restore(flags);
 }
@@ -141,28 +153,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
 #endif
 
-	/*
-	 * Get a new ASID if task doesn't have a valid one. Possible when
-	 *  -task never had an ASID (fresh after fork)
-	 *  -it's ASID was stolen - past an ASID roll-over.
-	 *  -There's a third obscure scenario (if this task is running for the
-	 *   first time afer an ASID rollover), where despite having a valid
-	 *   ASID, we force a get for new ASID - see comments at top.
-	 *
-	 * Both the non-alloc scenario and first-use-after-rollover can be
-	 * detected using the single condition below:  NO_ASID = 256
-	 * while asid_cache is always a valid ASID value (0-255).
-	 */
-	if (next->context.asid > asid_cache) {
-		get_new_mmu_context(next);
-	} else {
-		/*
-		 * XXX: This will never happen given the chks above
-		 * BUG_ON(next->context.asid > MAX_ASID);
-		 */
-		write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
-	}
-
+	get_new_mmu_context(next);
 }
 
 static inline void destroy_context(struct mm_struct *mm)

+ 7 - 6
arch/arc/mm/tlb.c

@@ -258,13 +258,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
 		return;
 
 	/*
-	 * Workaround for Android weirdism:
-	 * A binder VMA could end up in a task such that vma->mm != tsk->mm
-	 * old code would cause h/w - s/w ASID to get out of sync
+	 * - Move to a new ASID, but only if the mm is still wired in
+	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
+	 *    causing h/w - s/w ASID to get out of sync)
+	 * - Also get_new_mmu_context() new implementation allocates a new
+	 *   ASID only if it is not allocated already - so unallocate first
 	 */
-	if (current->mm != mm)
-		destroy_context(mm);
-	else
+	destroy_context(mm);
+	if (current->mm == mm)
 		get_new_mmu_context(mm);
 }