|
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|
|
* Swap entry may have been freed since our caller observed it.
|
|
|
*/
|
|
|
err = swapcache_prepare(entry);
|
|
|
- if (err == -EEXIST) { /* seems racy */
|
|
|
+ if (err == -EEXIST) {
|
|
|
radix_tree_preload_end();
|
|
|
+ /*
|
|
|
+ * We might race against get_swap_page() and stumble
|
|
|
+ * across a SWAP_HAS_CACHE swap_map entry whose page
|
|
|
+ * has not been brought into the swapcache yet, while
|
|
|
+ * the other end is scheduled away waiting on discard
|
|
|
+ * I/O completion at scan_swap_map().
|
|
|
+ *
|
|
|
+ * In order to avoid turning this transitory state
|
|
|
+ * into a permanent loop around this -EEXIST case
|
|
|
+ * if !CONFIG_PREEMPT and the I/O completion happens
|
|
|
+ * to be waiting on the CPU waitqueue where we are now
|
|
|
+ * busy looping, we just conditionally invoke the
|
|
|
+ * scheduler here, if there are some more important
|
|
|
+ * tasks to run.
|
|
|
+ */
|
|
|
+ cond_resched();
|
|
|
continue;
|
|
|
}
|
|
|
if (err) { /* swp entry is obsolete ? */
|