|
@@ -211,7 +211,7 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
void *vfrom, *vto;
|
|
|
|
|
|
vto = kmap_atomic(to, KM_USER1);
|
|
|
- if (cpu_has_dc_aliases && !Page_dcache_dirty(from)) {
|
|
|
+ if (cpu_has_dc_aliases && page_mapped(from)) {
|
|
|
vfrom = kmap_coherent(from, vaddr);
|
|
|
copy_page(vto, vfrom);
|
|
|
kunmap_coherent();
|
|
@@ -234,12 +234,15 @@ void copy_to_user_page(struct vm_area_struct *vma,
|
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- if (cpu_has_dc_aliases) {
|
|
|
+ if (cpu_has_dc_aliases && page_mapped(page)) {
|
|
|
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
memcpy(vto, src, len);
|
|
|
kunmap_coherent();
|
|
|
- } else
|
|
|
+ } else {
|
|
|
memcpy(dst, src, len);
|
|
|
+ if (cpu_has_dc_aliases)
|
|
|
+ SetPageDcacheDirty(page);
|
|
|
+ }
|
|
|
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
|
|
|
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
|
|
}
|
|
@@ -250,13 +253,15 @@ void copy_from_user_page(struct vm_area_struct *vma,
|
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- if (cpu_has_dc_aliases) {
|
|
|
- void *vfrom =
|
|
|
- kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
+ if (cpu_has_dc_aliases && page_mapped(page)) {
|
|
|
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
memcpy(dst, vfrom, len);
|
|
|
kunmap_coherent();
|
|
|
- } else
|
|
|
+ } else {
|
|
|
memcpy(dst, src, len);
|
|
|
+ if (cpu_has_dc_aliases)
|
|
|
+ SetPageDcacheDirty(page);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(copy_from_user_page);
|