|
@@ -128,6 +128,15 @@ static void flush_context(unsigned int cpu)
|
|
|
asid = 0;
|
|
|
} else {
|
|
|
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
|
|
|
+ /*
|
|
|
+ * If this CPU has already been through a
|
|
|
+ * rollover, but hasn't run another task in
|
|
|
+ * the meantime, we must preserve its reserved
|
|
|
+ * ASID, as this is the only trace we have of
|
|
|
+ * the process it is still running.
|
|
|
+ */
|
|
|
+ if (asid == 0)
|
|
|
+ asid = per_cpu(reserved_asids, i);
|
|
|
__set_bit(ASID_TO_IDX(asid), asid_map);
|
|
|
}
|
|
|
per_cpu(reserved_asids, i) = asid;
|