|
@@ -34,39 +34,22 @@
|
|
|
* When it reaches max 255, the allocation cycle starts afresh by flushing
|
|
|
* the entire TLB and wrapping ASID back to zero.
|
|
|
*
|
|
|
- * For book-keeping, Linux uses a couple of data-structures:
|
|
|
- * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
|
|
|
- * time of say switch_mm( )
|
|
|
- * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
|
|
|
- * given an ASID, finding the mm struct associated.
|
|
|
- *
|
|
|
- * The round-robin allocation algorithm allows for ASID stealing.
|
|
|
- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
|
|
|
- * already assigned to another (switched-out) task. Obviously the prev owner
|
|
|
- * is marked with an invalid ASID to make it request for a new ASID when it
|
|
|
- * gets scheduled next time. However its TLB entries (with ASID "x") could
|
|
|
- * exist, which must be cleared before the same ASID is used by the new owner.
|
|
|
- * Flushing them would be plausible but costly solution. Instead we force a
|
|
|
- * allocation policy quirk, which ensures that a stolen ASID won't have any
|
|
|
- * TLB entries associates, alleviating the need to flush.
|
|
|
- * The quirk essentially is not allowing ASID allocated in prev cycle
|
|
|
- * to be used past a roll-over in the next cycle.
|
|
|
- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
|
|
|
- * its ASID, aligning it to current value of tracker. If the task doesn't get
|
|
|
- * scheduled past a roll-over, hence its ASID is not yet realigned with
|
|
|
- * tracker, such ASID is anyways safely reusable because it is
|
|
|
- * gauranteed that TLB entries with that ASID wont exist.
|
|
|
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
|
|
|
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
|
|
|
+ * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
|
|
|
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
|
|
|
+ * automagically increments the generation when lower 8 bits rollover.
|
|
|
*/
|
|
|
|
|
|
-#define FIRST_ASID 0
|
|
|
-#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
|
|
|
-#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
|
|
|
-#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
|
|
|
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
|
|
|
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
|
|
|
+
|
|
|
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
|
|
|
+#define MM_CTXT_NO_ASID 0UL
|
|
|
|
|
|
-/* ASID to mm struct mapping */
|
|
|
-extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
|
|
|
+#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
|
|
|
|
|
|
-extern int asid_cache;
|
|
|
+extern unsigned int asid_cache;
|
|
|
|
|
|
/*
|
|
|
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
|
|
@@ -74,59 +57,42 @@ extern int asid_cache;
|
|
|
*/
|
|
|
static inline void get_new_mmu_context(struct mm_struct *mm)
|
|
|
{
|
|
|
- struct mm_struct *prev_owner;
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
/*
|
|
|
* Move to new ASID if it was not from current alloc-cycle/generation.
|
|
|
+ * This is done by ensuring that the generation bits in both mm->ASID
|
|
|
+ * and cpu's ASID counter are exactly same.
|
|
|
*
|
|
|
* Note: Callers needing new ASID unconditionally, independent of
|
|
|
* generation, e.g. local_flush_tlb_mm() for forking parent,
|
|
|
* first need to destroy the context, setting it to invalid
|
|
|
* value.
|
|
|
*/
|
|
|
- if (mm->context.asid <= asid_cache)
|
|
|
+ if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
|
|
|
goto set_hw;
|
|
|
|
|
|
- /*
|
|
|
- * Relinquish the currently owned ASID (if any).
|
|
|
- * Doing unconditionally saves a cmp-n-branch; for already unused
|
|
|
- * ASID slot, the value was/remains NULL
|
|
|
- */
|
|
|
- asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
|
|
|
+ /* move to new ASID and handle rollover */
|
|
|
+ if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
|
|
|
|
|
|
- /* move to new ASID */
|
|
|
- if (++asid_cache > MAX_ASID) { /* ASID roll-over */
|
|
|
- asid_cache = FIRST_ASID;
|
|
|
flush_tlb_all();
|
|
|
- }
|
|
|
|
|
|
- /*
|
|
|
- * Is next ASID already owned by some-one else (we are stealing it).
|
|
|
- * If so, let the orig owner be aware of this, so when it runs, it
|
|
|
- * asks for a brand new ASID. This would only happen for a long-lived
|
|
|
- * task with ASID from prev allocation cycle (before ASID roll-over).
|
|
|
- *
|
|
|
- * This might look wrong - if we are re-using some other task's ASID,
|
|
|
- * won't we use it's stale TLB entries too. Actually the algorithm takes
|
|
|
- * care of such a case: it ensures that task with ASID from prev alloc
|
|
|
- * cycle, when scheduled will refresh it's ASID
|
|
|
- * The stealing scenario described here will only happen if that task
|
|
|
- * didn't get a chance to refresh it's ASID - implying stale entries
|
|
|
- * won't exist.
|
|
|
- */
|
|
|
- prev_owner = asid_mm_map[asid_cache];
|
|
|
- if (prev_owner)
|
|
|
- prev_owner->context.asid = NO_ASID;
|
|
|
+ /*
|
|
|
+ * Above checke for rollover of 8 bit ASID in 32 bit container.
|
|
|
+ * If the container itself wrapped around, set it to a non zero
|
|
|
+ * "generation" to distinguish from no context
|
|
|
+ */
|
|
|
+ if (!asid_cache)
|
|
|
+ asid_cache = MM_CTXT_FIRST_CYCLE;
|
|
|
+ }
|
|
|
|
|
|
/* Assign new ASID to tsk */
|
|
|
- asid_mm_map[asid_cache] = mm;
|
|
|
mm->context.asid = asid_cache;
|
|
|
|
|
|
set_hw:
|
|
|
- write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE);
|
|
|
+ write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -138,7 +104,7 @@ set_hw:
|
|
|
static inline int
|
|
|
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
|
{
|
|
|
- mm->context.asid = NO_ASID;
|
|
|
+ mm->context.asid = MM_CTXT_NO_ASID;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -167,14 +133,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
|
|
static inline void destroy_context(struct mm_struct *mm)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
-
|
|
|
- asid_mm_map[mm->context.asid] = NULL;
|
|
|
- mm->context.asid = NO_ASID;
|
|
|
-
|
|
|
- local_irq_restore(flags);
|
|
|
+ mm->context.asid = MM_CTXT_NO_ASID;
|
|
|
}
|
|
|
|
|
|
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
|