|
@@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
|
|
|
}
|
|
|
EXPORT_SYMBOL(flush_dcache_page);
|
|
|
|
|
|
+/*
|
|
|
+ * Ensure cache coherency for the kernel mapping of this page. We can
|
|
|
+ * assume that the page is pinned via kmap.
|
|
|
+ *
|
|
|
+ * If the page only exists in the page cache and there are no user
|
|
|
+ * space mappings, this is a no-op since the page was already marked
|
|
|
+ * dirty at creation. Otherwise, we need to flush the dirty kernel
|
|
|
+ * cache lines directly.
|
|
|
+ */
|
|
|
+void flush_kernel_dcache_page(struct page *page)
|
|
|
+{
|
|
|
+ if (cache_is_vivt() || cache_is_vipt_aliasing()) {
|
|
|
+ struct address_space *mapping;
|
|
|
+
|
|
|
+ mapping = page_mapping(page);
|
|
|
+
|
|
|
+ if (!mapping || mapping_mapped(mapping)) {
|
|
|
+ void *addr;
|
|
|
+
|
|
|
+ addr = page_address(page);
|
|
|
+ /*
|
|
|
+ * kmap_atomic() doesn't set the page virtual
|
|
|
+ * address for highmem pages, and
|
|
|
+ * kunmap_atomic() takes care of cache
|
|
|
+ * flushing already.
|
|
|
+ */
|
|
|
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
|
|
|
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(flush_kernel_dcache_page);
|
|
|
+
|
|
|
/*
|
|
|
* Flush an anonymous page so that users of get_user_pages()
|
|
|
* can safely access the data. The expected sequence is:
|