|
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
{
|
|
|
void *vfrom, *vto;
|
|
|
|
|
|
- vto = kmap_atomic(to, KM_USER1);
|
|
|
+ vto = kmap_atomic(to);
|
|
|
|
|
|
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
|
|
|
test_bit(PG_dcache_clean, &from->flags)) {
|
|
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
copy_page(vto, vfrom);
|
|
|
kunmap_coherent(vfrom);
|
|
|
} else {
|
|
|
- vfrom = kmap_atomic(from, KM_USER0);
|
|
|
+ vfrom = kmap_atomic(from);
|
|
|
copy_page(vto, vfrom);
|
|
|
- kunmap_atomic(vfrom, KM_USER0);
|
|
|
+ kunmap_atomic(vfrom);
|
|
|
}
|
|
|
|
|
|
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
|
|
|
(vma->vm_flags & VM_EXEC))
|
|
|
__flush_purge_region(vto, PAGE_SIZE);
|
|
|
|
|
|
- kunmap_atomic(vto, KM_USER1);
|
|
|
+ kunmap_atomic(vto);
|
|
|
/* Make sure this page is cleared on other CPU's too before using it */
|
|
|
smp_wmb();
|
|
|
}
|
|
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
|
|
|
|
|
|
void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|
|
{
|
|
|
- void *kaddr = kmap_atomic(page, KM_USER0);
|
|
|
+ void *kaddr = kmap_atomic(page);
|
|
|
|
|
|
clear_page(kaddr);
|
|
|
|
|
|
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
|
|
|
__flush_purge_region(kaddr, PAGE_SIZE);
|
|
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
+ kunmap_atomic(kaddr);
|
|
|
}
|
|
|
EXPORT_SYMBOL(clear_user_highpage);
|
|
|
|