|
@@ -211,7 +211,8 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
void *vfrom, *vto;
|
|
|
|
|
|
vto = kmap_atomic(to, KM_USER1);
|
|
|
- if (cpu_has_dc_aliases && page_mapped(from)) {
|
|
|
+ if (cpu_has_dc_aliases &&
|
|
|
+ page_mapped(from) && !Page_dcache_dirty(from)) {
|
|
|
vfrom = kmap_coherent(from, vaddr);
|
|
|
copy_page(vto, vfrom);
|
|
|
kunmap_coherent();
|
|
@@ -234,7 +235,8 @@ void copy_to_user_page(struct vm_area_struct *vma,
|
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- if (cpu_has_dc_aliases && page_mapped(page)) {
|
|
|
+ if (cpu_has_dc_aliases &&
|
|
|
+ page_mapped(page) && !Page_dcache_dirty(page)) {
|
|
|
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
memcpy(vto, src, len);
|
|
|
kunmap_coherent();
|
|
@@ -253,7 +255,8 @@ void copy_from_user_page(struct vm_area_struct *vma,
|
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- if (cpu_has_dc_aliases && page_mapped(page)) {
|
|
|
+ if (cpu_has_dc_aliases &&
|
|
|
+ page_mapped(page) && !Page_dcache_dirty(page)) {
|
|
|
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
memcpy(dst, vfrom, len);
|
|
|
kunmap_coherent();
|