|
@@ -33,8 +33,6 @@
|
|
|
|
|
|
#include "mmu_decl.h"
|
|
|
|
|
|
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
/*
|
|
@@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
* freeing a page table page that is being walked without locks
|
|
|
*/
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
|
|
|
static unsigned long pte_freelist_forced_free;
|
|
|
|
|
|
struct pte_freelist_batch
|
|
@@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
|
|
|
|
|
|
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
|
|
|
{
|
|
|
- /* This is safe since tlb_gather_mmu has disabled preemption */
|
|
|
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
|
|
+ struct pte_freelist_batch **batchp = &tlb->arch.batch;
|
|
|
unsigned long pgf;
|
|
|
|
|
|
- if (atomic_read(&tlb->mm->mm_users) < 2 ||
|
|
|
- cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
|
|
|
+ if (atomic_read(&tlb->mm->mm_users) < 2) {
|
|
|
pgtable_free(table, shift);
|
|
|
return;
|
|
|
}
|
|
@@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void pte_free_finish(void)
|
|
|
+void pte_free_finish(struct mmu_gather *tlb)
|
|
|
{
|
|
|
- /* This is safe since tlb_gather_mmu has disabled preemption */
|
|
|
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
|
|
+ struct pte_freelist_batch **batchp = &tlb->arch.batch;
|
|
|
|
|
|
if (*batchp == NULL)
|
|
|
return;
|