|
@@ -1145,8 +1145,20 @@ repeat:
|
|
|
if (!error) {
|
|
|
error = shmem_add_to_page_cache(page, mapping, index,
|
|
|
gfp, swp_to_radix_entry(swap));
|
|
|
- /* We already confirmed swap, and make no allocation */
|
|
|
- VM_BUG_ON(error);
|
|
|
+ /*
|
|
|
+ * We already confirmed swap under page lock, and make
|
|
|
+ * no memory allocation here, so usually no possibility
|
|
|
+ * of error; but free_swap_and_cache() only trylocks a
|
|
|
+ * page, so it is just possible that the entry has been
|
|
|
+ * truncated or holepunched since swap was confirmed.
|
|
|
+ * shmem_undo_range() will have done some of the
|
|
|
+ * unaccounting, now delete_from_swap_cache() will do
|
|
|
+ * the rest (including mem_cgroup_uncharge_swapcache).
|
|
|
+ * Reset swap.val? No, leave it so "failed" goes back to
|
|
|
+ * "repeat": reading a hole and writing should succeed.
|
|
|
+ */
|
|
|
+ if (error)
|
|
|
+ delete_from_swap_cache(page);
|
|
|
}
|
|
|
if (error)
|
|
|
goto failed;
|