|
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
|
|
|
}
|
|
|
EXPORT_SYMBOL(ioremap_page);
|
|
|
|
|
|
-void __check_kvm_seq(struct mm_struct *mm)
|
|
|
+void __check_vmalloc_seq(struct mm_struct *mm)
|
|
|
{
|
|
|
unsigned int seq;
|
|
|
|
|
|
do {
|
|
|
- seq = init_mm.context.kvm_seq;
|
|
|
+ seq = init_mm.context.vmalloc_seq;
|
|
|
memcpy(pgd_offset(mm, VMALLOC_START),
|
|
|
pgd_offset_k(VMALLOC_START),
|
|
|
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
|
|
pgd_index(VMALLOC_START)));
|
|
|
- mm->context.kvm_seq = seq;
|
|
|
- } while (seq != init_mm.context.kvm_seq);
|
|
|
+ mm->context.vmalloc_seq = seq;
|
|
|
+ } while (seq != init_mm.context.vmalloc_seq);
|
|
|
}
|
|
|
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
|
|
if (!pmd_none(pmd)) {
|
|
|
/*
|
|
|
* Clear the PMD from the page table, and
|
|
|
- * increment the kvm sequence so others
|
|
|
+ * increment the vmalloc sequence so others
|
|
|
* notice this change.
|
|
|
*
|
|
|
* Note: this is still racy on SMP machines.
|
|
|
*/
|
|
|
pmd_clear(pmdp);
|
|
|
- init_mm.context.kvm_seq++;
|
|
|
+ init_mm.context.vmalloc_seq++;
|
|
|
|
|
|
/*
|
|
|
* Free the page table, if there was one.
|
|
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
|
|
* Ensure that the active_mm is up to date - we want to
|
|
|
* catch any use-after-iounmap cases.
|
|
|
*/
|
|
|
- if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
|
|
|
- __check_kvm_seq(current->active_mm);
|
|
|
+ if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
|
|
|
+ __check_vmalloc_seq(current->active_mm);
|
|
|
|
|
|
flush_tlb_kernel_range(virt, end);
|
|
|
}
|