Explorar o código

sh: Provide __flush_anon_page().

This provides a __flush_anon_page() that handles both the aliasing and
non-aliasing cases. This fixes up some crashes with heavy
get_user_pages() users.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt %!s(int64=16) %!d(string=hai) anos
pai
achega
c0fe478dbb
Modificáronse 2 ficheiros con 31 adicións e 0 borrados
  1. 14 0
      arch/sh/include/asm/cacheflush.h
  2. 17 0
      arch/sh/mm/pg-mmu.c

+ 14 - 0
arch/sh/include/asm/cacheflush.h

@@ -1,6 +1,8 @@
 #ifndef __ASM_SH_CACHEFLUSH_H
 #define __ASM_SH_CACHEFLUSH_H
 
+#include <linux/mm.h>
+
 #ifdef __KERNEL__
 
 #ifdef CONFIG_CACHE_OFF
@@ -43,6 +45,18 @@ extern void __flush_purge_region(void *start, int size);
 extern void __flush_invalidate_region(void *start, int size);
 #endif
 
+#ifdef CONFIG_MMU
+#define ARCH_HAS_FLUSH_ANON_PAGE
+extern void __flush_anon_page(struct page *page, unsigned long);
+
+static inline void flush_anon_page(struct vm_area_struct *vma,
+				   struct page *page, unsigned long vmaddr)
+{
+	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
+		__flush_anon_page(page, vmaddr);
+}
+#endif
+
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
 static inline void flush_kernel_dcache_page(struct page *page)
 {

+ 17 - 0
arch/sh/mm/pg-mmu.c

@@ -157,3 +157,20 @@ void __update_cache(struct vm_area_struct *vma,
 		}
 	}
 }
+
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+	unsigned long addr = (unsigned long) page_address(page);
+
+	if (pages_do_alias(addr, vmaddr)) {
+		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+		    !test_bit(PG_dcache_dirty, &page->flags)) {
+			void *kaddr;
+
+			kaddr = kmap_coherent(page, vmaddr);
+			__flush_wback_region((void *)kaddr, PAGE_SIZE);
+			kunmap_coherent();
+		} else
+			__flush_wback_region((void *)addr, PAGE_SIZE);
+	}
+}