|
@@ -203,6 +203,31 @@ static inline void kunmap_coherent(struct page *page)
|
|
preempt_check_resched();
|
|
preempt_check_resched();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void copy_user_highpage(struct page *to, struct page *from,
|
|
|
|
+ unsigned long vaddr, struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ void *vfrom, *vto;
|
|
|
|
+
|
|
|
|
+ vto = kmap_atomic(to, KM_USER1);
|
|
|
|
+ if (cpu_has_dc_aliases) {
|
|
|
|
+ vfrom = kmap_coherent(from, vaddr);
|
|
|
|
+ copy_page(vto, vfrom);
|
|
|
|
+ kunmap_coherent(from);
|
|
|
|
+ } else {
|
|
|
|
+ vfrom = kmap_atomic(from, KM_USER0);
|
|
|
|
+ copy_page(vto, vfrom);
|
|
|
|
+ kunmap_atomic(vfrom, KM_USER0);
|
|
|
|
+ }
|
|
|
|
+ if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
|
|
|
|
+ pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
|
|
|
|
+ flush_data_cache_page((unsigned long)vto);
|
|
|
|
+ kunmap_atomic(vto, KM_USER1);
|
|
|
|
+ /* Make sure this page is cleared on other CPU's too before using it */
|
|
|
|
+ smp_wmb();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+EXPORT_SYMBOL(copy_user_highpage);
|
|
|
|
+
|
|
void copy_to_user_page(struct vm_area_struct *vma,
|
|
void copy_to_user_page(struct vm_area_struct *vma,
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
unsigned long len)
|
|
unsigned long len)
|