|
@@ -34,13 +34,13 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|
|
{
|
|
|
HV_Remote_ASID asids[NR_CPUS];
|
|
|
int i = 0, cpu;
|
|
|
- for_each_cpu(cpu, &mm->cpu_vm_mask) {
|
|
|
+ for_each_cpu(cpu, mm_cpumask(mm)) {
|
|
|
HV_Remote_ASID *asid = &asids[i++];
|
|
|
asid->y = cpu / smp_topology.width;
|
|
|
asid->x = cpu % smp_topology.width;
|
|
|
asid->asid = per_cpu(current_asid, cpu);
|
|
|
}
|
|
|
- flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask,
|
|
|
+ flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
|
|
|
0, 0, 0, NULL, asids, i);
|
|
|
}
|
|
|
|
|
@@ -54,8 +54,8 @@ void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
|
|
|
{
|
|
|
unsigned long size = hv_page_size(vma);
|
|
|
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
|
|
- flush_remote(0, cache, &mm->cpu_vm_mask,
|
|
|
- va, size, size, &mm->cpu_vm_mask, NULL, 0);
|
|
|
+ flush_remote(0, cache, mm_cpumask(mm),
|
|
|
+ va, size, size, mm_cpumask(mm), NULL, 0);
|
|
|
}
|
|
|
|
|
|
void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
|
|
@@ -70,8 +70,8 @@ void flush_tlb_range(const struct vm_area_struct *vma,
|
|
|
unsigned long size = hv_page_size(vma);
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
|
|
|
- flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size,
|
|
|
- &mm->cpu_vm_mask, NULL, 0);
|
|
|
+ flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
|
|
|
+ mm_cpumask(mm), NULL, 0);
|
|
|
}
|
|
|
|
|
|
void flush_tlb_all(void)
|