Forráskód Böngészése

cpumask: use mm_cpumask() wrapper: cris

Makes code futureproof against the impending change to mm->cpu_vm_mask.

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Rusty Russell 16 éve
szülő
commit
b9d65c0477
2 módosított fájl, 4 hozzáadás és 4 törlés
  1. 3 3
      arch/cris/arch-v32/kernel/smp.c
  2. 1 1
      arch/cris/arch-v32/mm/tlb.c

+ 3 - 3
arch/cris/arch-v32/kernel/smp.c

@@ -232,7 +232,7 @@ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned
 	cpumask_t cpu_mask;
 
 	spin_lock_irqsave(&tlbstate_lock, flags);
-	cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask);
+	cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
 	cpu_clear(smp_processor_id(), cpu_mask);
 	flush_mm = mm;
 	flush_vma = vma;
@@ -252,8 +252,8 @@ void flush_tlb_mm(struct mm_struct *mm)
 	__flush_tlb_mm(mm);
 	flush_tlb_common(mm, FLUSH_ALL, 0);
 	/* No more mappings in other CPUs */
-	cpus_clear(mm->cpu_vm_mask);
-	cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+	cpumask_clear(mm_cpumask(mm));
+	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
 }
 
 void flush_tlb_page(struct vm_area_struct *vma,

+ 1 - 1
arch/cris/arch-v32/mm/tlb.c

@@ -185,7 +185,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 		/* Make sure there is a MMU context. */
 		spin_lock(&mmu_context_lock);
 		get_mmu_context(next);
-		cpu_set(cpu, next->cpu_vm_mask);
+		cpumask_set_cpu(cpu, mm_cpumask(next));
 		spin_unlock(&mmu_context_lock);
 
 		/*