|
@@ -386,6 +386,76 @@ int filemap_write_and_wait_range(struct address_space *mapping,
|
|
|
}
|
|
|
EXPORT_SYMBOL(filemap_write_and_wait_range);
|
|
|
|
|
|
+/**
|
|
|
+ * replace_page_cache_page - replace a pagecache page with a new one
|
|
|
+ * @old: page to be replaced
|
|
|
+ * @new: page to replace with
|
|
|
+ * @gfp_mask: allocation mode
|
|
|
+ *
|
|
|
+ * This function replaces a page in the pagecache with a new one. On
|
|
|
+ * success it acquires the pagecache reference for the new page and
|
|
|
+ * drops it for the old page. Both the old and new pages must be
|
|
|
+ * locked. This function does not add the new page to the LRU, the
|
|
|
+ * caller must do that.
|
|
|
+ *
|
|
|
+ * The remove + add is atomic. The only way this function can fail is
|
|
|
+ * memory allocation failure.
|
|
|
+ */
|
|
|
+int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ int error;
|
|
|
+ struct mem_cgroup *memcg = NULL;
|
|
|
+
|
|
|
+ VM_BUG_ON(!PageLocked(old));
|
|
|
+ VM_BUG_ON(!PageLocked(new));
|
|
|
+ VM_BUG_ON(new->mapping);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This is not page migration, but prepare_migration and
|
|
|
+ * end_migration does enough work for charge replacement.
|
|
|
+ *
|
|
|
+ * In the longer term we probably want a specialized function
|
|
|
+ * for moving the charge from old to new in a more efficient
|
|
|
+ * manner.
|
|
|
+ */
|
|
|
+ error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
|
|
|
+ if (!error) {
|
|
|
+ struct address_space *mapping = old->mapping;
|
|
|
+ void (*freepage)(struct page *);
|
|
|
+
|
|
|
+ pgoff_t offset = old->index;
|
|
|
+ freepage = mapping->a_ops->freepage;
|
|
|
+
|
|
|
+ page_cache_get(new);
|
|
|
+ new->mapping = mapping;
|
|
|
+ new->index = offset;
|
|
|
+
|
|
|
+ spin_lock_irq(&mapping->tree_lock);
|
|
|
+ __remove_from_page_cache(old);
|
|
|
+ error = radix_tree_insert(&mapping->page_tree, offset, new);
|
|
|
+ BUG_ON(error);
|
|
|
+ mapping->nrpages++;
|
|
|
+ __inc_zone_page_state(new, NR_FILE_PAGES);
|
|
|
+ if (PageSwapBacked(new))
|
|
|
+ __inc_zone_page_state(new, NR_SHMEM);
|
|
|
+ spin_unlock_irq(&mapping->tree_lock);
|
|
|
+ radix_tree_preload_end();
|
|
|
+ if (freepage)
|
|
|
+ freepage(old);
|
|
|
+ page_cache_release(old);
|
|
|
+ mem_cgroup_end_migration(memcg, old, new, true);
|
|
|
+ } else {
|
|
|
+ mem_cgroup_end_migration(memcg, old, new, false);
|
|
|
+ }
|
|
|
+
|
|
|
+ return error;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
|
|
+
|
|
|
/**
|
|
|
* add_to_page_cache_locked - add a locked page to the pagecache
|
|
|
* @page: page to add
|