|
@@ -7,12 +7,13 @@
|
|
|
*/
|
|
|
|
|
|
/*
|
|
|
- * Routines to manage the allocation of task context numbers. Task context numbers are
|
|
|
- * used to reduce or eliminate the need to perform TLB flushes due to context switches.
|
|
|
- * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
|
|
|
- * consider the region number when performing a TLB lookup, we need to assign a unique
|
|
|
- * region id to each region in a process. We use the least significant three bits in a
|
|
|
- * region id for this purpose.
|
|
|
+ * Routines to manage the allocation of task context numbers. Task context
|
|
|
+ * numbers are used to reduce or eliminate the need to perform TLB flushes
|
|
|
+ * due to context switches. Context numbers are implemented using ia-64
|
|
|
+ * region ids. Since the IA-64 TLB does not consider the region number when
|
|
|
+ * performing a TLB lookup, we need to assign a unique region id to each
|
|
|
+ * region in a process. We use the least significant three bits in aregion
|
|
|
+ * id for this purpose.
|
|
|
*/
|
|
|
|
|
|
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
|
|
@@ -51,10 +52,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * When the context counter wraps around all TLBs need to be flushed because an old
|
|
|
- * context number might have been reused. This is signalled by the ia64_need_tlb_flush
|
|
|
- * per-CPU variable, which is checked in the routine below. Called by activate_mm().
|
|
|
- * <efocht@ess.nec.de>
|
|
|
+ * When the context counter wraps around all TLBs need to be flushed because
|
|
|
+ * an old context number might have been reused. This is signalled by the
|
|
|
+ * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
|
|
|
+ * below. Called by activate_mm(). <efocht@ess.nec.de>
|
|
|
*/
|
|
|
static inline void
|
|
|
delayed_tlb_flush (void)
|
|
@@ -64,11 +65,9 @@ delayed_tlb_flush (void)
|
|
|
|
|
|
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
|
|
|
spin_lock_irqsave(&ia64_ctx.lock, flags);
|
|
|
- {
|
|
|
- if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
|
|
|
- local_flush_tlb_all();
|
|
|
- __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
|
|
|
- }
|
|
|
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
|
|
|
+ local_flush_tlb_all();
|
|
|
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
|
|
|
}
|
|
|
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
|
|
}
|
|
@@ -80,27 +79,27 @@ get_mmu_context (struct mm_struct *mm)
|
|
|
unsigned long flags;
|
|
|
nv_mm_context_t context = mm->context;
|
|
|
|
|
|
- if (unlikely(!context)) {
|
|
|
- spin_lock_irqsave(&ia64_ctx.lock, flags);
|
|
|
- {
|
|
|
- /* re-check, now that we've got the lock: */
|
|
|
- context = mm->context;
|
|
|
- if (context == 0) {
|
|
|
- cpus_clear(mm->cpu_vm_mask);
|
|
|
- if (ia64_ctx.next >= ia64_ctx.limit) {
|
|
|
- ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
|
|
|
- ia64_ctx.max_ctx, ia64_ctx.next);
|
|
|
- ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
|
|
|
- ia64_ctx.max_ctx, ia64_ctx.next);
|
|
|
- if (ia64_ctx.next >= ia64_ctx.max_ctx)
|
|
|
- wrap_mmu_context(mm);
|
|
|
- }
|
|
|
- mm->context = context = ia64_ctx.next++;
|
|
|
- __set_bit(context, ia64_ctx.bitmap);
|
|
|
- }
|
|
|
+ if (likely(context))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
|
|
|
+ /* re-check, now that we've got the lock: */
|
|
|
+ context = mm->context;
|
|
|
+ if (context == 0) {
|
|
|
+ cpus_clear(mm->cpu_vm_mask);
|
|
|
+ if (ia64_ctx.next >= ia64_ctx.limit) {
|
|
|
+ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
|
|
|
+ ia64_ctx.max_ctx, ia64_ctx.next);
|
|
|
+ ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
|
|
|
+ ia64_ctx.max_ctx, ia64_ctx.next);
|
|
|
+ if (ia64_ctx.next >= ia64_ctx.max_ctx)
|
|
|
+ wrap_mmu_context(mm);
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
|
|
+ mm->context = context = ia64_ctx.next++;
|
|
|
+ __set_bit(context, ia64_ctx.bitmap);
|
|
|
}
|
|
|
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
|
|
|
+out:
|
|
|
/*
|
|
|
* Ensure we're not starting to use "context" before any old
|
|
|
* uses of it are gone from our TLB.
|
|
@@ -111,8 +110,8 @@ get_mmu_context (struct mm_struct *mm)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Initialize context number to some sane value. MM is guaranteed to be a brand-new
|
|
|
- * address-space, so no TLB flushing is needed, ever.
|
|
|
+ * Initialize context number to some sane value. MM is guaranteed to be a
|
|
|
+ * brand-new address-space, so no TLB flushing is needed, ever.
|
|
|
*/
|
|
|
static inline int
|
|
|
init_new_context (struct task_struct *p, struct mm_struct *mm)
|
|
@@ -173,7 +172,10 @@ activate_context (struct mm_struct *mm)
|
|
|
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
|
|
|
cpu_set(smp_processor_id(), mm->cpu_vm_mask);
|
|
|
reload_context(context);
|
|
|
- /* in the unlikely event of a TLB-flush by another thread, redo the load: */
|
|
|
+ /*
|
|
|
+ * in the unlikely event of a TLB-flush by another thread,
|
|
|
+ * redo the load.
|
|
|
+ */
|
|
|
} while (unlikely(context != mm->context));
|
|
|
}
|
|
|
|
|
@@ -186,8 +188,8 @@ static inline void
|
|
|
activate_mm (struct mm_struct *prev, struct mm_struct *next)
|
|
|
{
|
|
|
/*
|
|
|
- * We may get interrupts here, but that's OK because interrupt handlers cannot
|
|
|
- * touch user-space.
|
|
|
+ * We may get interrupts here, but that's OK because interrupt
|
|
|
+ * handlers cannot touch user-space.
|
|
|
*/
|
|
|
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
|
|
|
activate_context(next);
|