|
@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
|
|
mutex_lock(&shmem_swaplist_mutex);
|
|
mutex_lock(&shmem_swaplist_mutex);
|
|
/*
|
|
/*
|
|
* We needed to drop mutex to make that restrictive page
|
|
* We needed to drop mutex to make that restrictive page
|
|
- * allocation; but the inode might already be freed by now,
|
|
|
|
- * and we cannot refer to inode or mapping or info to check.
|
|
|
|
- * However, we do hold page lock on the PageSwapCache page,
|
|
|
|
- * so can check if that still has our reference remaining.
|
|
|
|
|
|
+ * allocation, but the inode might have been freed while we
|
|
|
|
+ * dropped it: although a racing shmem_evict_inode() cannot
|
|
|
|
+ * complete without emptying the radix_tree, our page lock
|
|
|
|
+ * on this swapcache page is not enough to prevent that -
|
|
|
|
+ * free_swap_and_cache() of our swap entry will only
|
|
|
|
+ * trylock_page(), removing swap from radix_tree whatever.
|
|
|
|
+ *
|
|
|
|
+ * We must not proceed to shmem_add_to_page_cache() if the
|
|
|
|
+ * inode has been freed, but of course we cannot rely on
|
|
|
|
+ * inode or mapping or info to check that. However, we can
|
|
|
|
+ * safely check if our swap entry is still in use (and here
|
|
|
|
+ * it can't have got reused for another page): if it's still
|
|
|
|
+ * in use, then the inode cannot have been freed yet, and we
|
|
|
|
+ * can safely proceed (if it's no longer in use, that tells
|
|
|
|
+ * nothing about the inode, but we don't need to unuse swap).
|
|
*/
|
|
*/
|
|
if (!page_swapcount(*pagep))
|
|
if (!page_swapcount(*pagep))
|
|
error = -ENOENT;
|
|
error = -ENOENT;
|
|
@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
|
|
|
|
|
|
/*
|
|
/*
|
|
* There's a faint possibility that swap page was replaced before
|
|
* There's a faint possibility that swap page was replaced before
|
|
- * caller locked it: it will come back later with the right page.
|
|
|
|
|
|
+ * caller locked it: caller will come back later with the right page.
|
|
*/
|
|
*/
|
|
- if (unlikely(!PageSwapCache(page)))
|
|
|
|
|
|
+ if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|
newpage = shmem_alloc_page(gfp, info, index);
|
|
newpage = shmem_alloc_page(gfp, info, index);
|
|
if (!newpage)
|
|
if (!newpage)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
- VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
|
|
|
|
|
|
|
|
- *pagep = newpage;
|
|
|
|
page_cache_get(newpage);
|
|
page_cache_get(newpage);
|
|
copy_highpage(newpage, oldpage);
|
|
copy_highpage(newpage, oldpage);
|
|
|
|
+ flush_dcache_page(newpage);
|
|
|
|
|
|
- VM_BUG_ON(!PageLocked(oldpage));
|
|
|
|
__set_page_locked(newpage);
|
|
__set_page_locked(newpage);
|
|
- VM_BUG_ON(!PageUptodate(oldpage));
|
|
|
|
SetPageUptodate(newpage);
|
|
SetPageUptodate(newpage);
|
|
- VM_BUG_ON(!PageSwapBacked(oldpage));
|
|
|
|
SetPageSwapBacked(newpage);
|
|
SetPageSwapBacked(newpage);
|
|
- VM_BUG_ON(!swap_index);
|
|
|
|
set_page_private(newpage, swap_index);
|
|
set_page_private(newpage, swap_index);
|
|
- VM_BUG_ON(!PageSwapCache(oldpage));
|
|
|
|
SetPageSwapCache(newpage);
|
|
SetPageSwapCache(newpage);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|
spin_lock_irq(&swap_mapping->tree_lock);
|
|
spin_lock_irq(&swap_mapping->tree_lock);
|
|
error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
|
|
error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
|
|
newpage);
|
|
newpage);
|
|
- __inc_zone_page_state(newpage, NR_FILE_PAGES);
|
|
|
|
- __dec_zone_page_state(oldpage, NR_FILE_PAGES);
|
|
|
|
|
|
+ if (!error) {
|
|
|
|
+ __inc_zone_page_state(newpage, NR_FILE_PAGES);
|
|
|
|
+ __dec_zone_page_state(oldpage, NR_FILE_PAGES);
|
|
|
|
+ }
|
|
spin_unlock_irq(&swap_mapping->tree_lock);
|
|
spin_unlock_irq(&swap_mapping->tree_lock);
|
|
- BUG_ON(error);
|
|
|
|
|
|
|
|
- mem_cgroup_replace_page_cache(oldpage, newpage);
|
|
|
|
- lru_cache_add_anon(newpage);
|
|
|
|
|
|
+ if (unlikely(error)) {
|
|
|
|
+ /*
|
|
|
|
+ * Is this possible? I think not, now that our callers check
|
|
|
|
+ * both PageSwapCache and page_private after getting page lock;
|
|
|
|
+ * but be defensive. Reverse old to newpage for clear and free.
|
|
|
|
+ */
|
|
|
|
+ oldpage = newpage;
|
|
|
|
+ } else {
|
|
|
|
+ mem_cgroup_replace_page_cache(oldpage, newpage);
|
|
|
|
+ lru_cache_add_anon(newpage);
|
|
|
|
+ *pagep = newpage;
|
|
|
|
+ }
|
|
|
|
|
|
ClearPageSwapCache(oldpage);
|
|
ClearPageSwapCache(oldpage);
|
|
set_page_private(oldpage, 0);
|
|
set_page_private(oldpage, 0);
|
|
@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|
unlock_page(oldpage);
|
|
unlock_page(oldpage);
|
|
page_cache_release(oldpage);
|
|
page_cache_release(oldpage);
|
|
page_cache_release(oldpage);
|
|
page_cache_release(oldpage);
|
|
- return 0;
|
|
|
|
|
|
+ return error;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1107,7 +1123,8 @@ repeat:
|
|
|
|
|
|
/* We have to do this with page locked to prevent races */
|
|
/* We have to do this with page locked to prevent races */
|
|
lock_page(page);
|
|
lock_page(page);
|
|
- if (!PageSwapCache(page) || page->mapping) {
|
|
|
|
|
|
+ if (!PageSwapCache(page) || page_private(page) != swap.val ||
|
|
|
|
+ page->mapping) {
|
|
error = -EEXIST; /* try again */
|
|
error = -EEXIST; /* try again */
|
|
goto failed;
|
|
goto failed;
|
|
}
|
|
}
|