|
@@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
|
|
* page. This ensures that data in the physical page is mutually
|
|
* page. This ensures that data in the physical page is mutually
|
|
* coherent with the kernels mapping.
|
|
* coherent with the kernels mapping.
|
|
*/
|
|
*/
|
|
- __cpuc_flush_dcache_page(page_address(page));
|
|
|
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
|
+ /*
|
|
|
|
+ * kmap_atomic() doesn't set the page virtual address, and
|
|
|
|
+ * kunmap_atomic() takes care of cache flushing already.
|
|
|
|
+ */
|
|
|
|
+ if (page_address(page))
|
|
|
|
+#endif
|
|
|
|
+ __cpuc_flush_dcache_page(page_address(page));
|
|
|
|
|
|
/*
|
|
/*
|
|
* If this is a page cache page, and we have an aliasing VIPT cache,
|
|
* If this is a page cache page, and we have an aliasing VIPT cache,
|