|
@@ -33,7 +33,7 @@
|
|
|
* To avoid global state use 8 different call vectors.
|
|
|
* Each CPU uses a specific vector to trigger flushes on other
|
|
|
* CPUs. Depending on the received vector the target CPUs look into
|
|
|
- * the right per cpu variable for the flush data.
|
|
|
+ * the right array slot for the flush data.
|
|
|
*
|
|
|
* With more than 8 CPUs they are hashed to the 8 available
|
|
|
* vectors. The limited global vector space forces us to this right now.
|
|
@@ -48,13 +48,13 @@ union smp_flush_state {
|
|
|
unsigned long flush_va;
|
|
|
spinlock_t tlbstate_lock;
|
|
|
};
|
|
|
- char pad[SMP_CACHE_BYTES];
|
|
|
-} ____cacheline_aligned;
|
|
|
+ char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
|
|
|
+} ____cacheline_internodealigned_in_smp;
|
|
|
|
|
|
/* State is put into the per CPU data section, but padded
|
|
|
to a full cache line because other CPUs can access it and we don't
|
|
|
want false sharing in the per cpu data segment. */
|
|
|
-static DEFINE_PER_CPU(union smp_flush_state, flush_state);
|
|
|
+static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
|
|
|
|
|
|
/*
|
|
|
* We cannot call mmdrop() because we are in interrupt context,
|
|
@@ -129,7 +129,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
|
|
|
* Use that to determine where the sender put the data.
|
|
|
*/
|
|
|
sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
|
|
|
- f = &per_cpu(flush_state, sender);
|
|
|
+ f = &flush_state[sender];
|
|
|
|
|
|
if (!cpu_isset(cpu, f->flush_cpumask))
|
|
|
goto out;
|
|
@@ -169,7 +169,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
|
|
|
|
|
|
/* Caller has disabled preemption */
|
|
|
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
|
|
|
- f = &per_cpu(flush_state, sender);
|
|
|
+ f = &flush_state[sender];
|
|
|
|
|
|
/*
|
|
|
* Could avoid this lock when
|
|
@@ -205,8 +205,8 @@ static int __cpuinit init_smp_flush(void)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- for_each_possible_cpu(i)
|
|
|
- spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
|
|
|
+ for (i = 0; i < ARRAY_SIZE(flush_state); i++)
|
|
|
+ spin_lock_init(&flush_state[i].tlbstate_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|