|
@@ -79,6 +79,32 @@ static inline unsigned short encode_swapmap(int count, bool has_cache)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* returnes 1 if swap entry is freed */
|
|
|
+static int
|
|
|
+__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
|
|
|
+{
|
|
|
+ int type = si - swap_info;
|
|
|
+ swp_entry_t entry = swp_entry(type, offset);
|
|
|
+ struct page *page;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ page = find_get_page(&swapper_space, entry.val);
|
|
|
+ if (!page)
|
|
|
+ return 0;
|
|
|
+ /*
|
|
|
+ * This function is called from scan_swap_map() and it's called
|
|
|
+ * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
|
|
|
+ * We have to use trylock for avoiding deadlock. This is a special
|
|
|
+ * case and you should use try_to_free_swap() with explicit lock_page()
|
|
|
+ * in usual operations.
|
|
|
+ */
|
|
|
+ if (trylock_page(page)) {
|
|
|
+ ret = try_to_free_swap(page);
|
|
|
+ unlock_page(page);
|
|
|
+ }
|
|
|
+ page_cache_release(page);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* We need this because the bdev->unplug_fn can sleep and we cannot
|
|
@@ -301,6 +327,19 @@ checks:
|
|
|
goto no_page;
|
|
|
if (offset > si->highest_bit)
|
|
|
scan_base = offset = si->lowest_bit;
|
|
|
+
|
|
|
+ /* reuse swap entry of cache-only swap if not busy. */
|
|
|
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
|
|
|
+ int swap_was_freed;
|
|
|
+ spin_unlock(&swap_lock);
|
|
|
+ swap_was_freed = __try_to_reclaim_swap(si, offset);
|
|
|
+ spin_lock(&swap_lock);
|
|
|
+ /* entry was freed successfully, try to use this again */
|
|
|
+ if (swap_was_freed)
|
|
|
+ goto checks;
|
|
|
+ goto scan; /* check next one */
|
|
|
+ }
|
|
|
+
|
|
|
if (si->swap_map[offset])
|
|
|
goto scan;
|
|
|
|
|
@@ -382,6 +421,10 @@ scan:
|
|
|
spin_lock(&swap_lock);
|
|
|
goto checks;
|
|
|
}
|
|
|
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
|
|
|
+ spin_lock(&swap_lock);
|
|
|
+ goto checks;
|
|
|
+ }
|
|
|
if (unlikely(--latency_ration < 0)) {
|
|
|
cond_resched();
|
|
|
latency_ration = LATENCY_LIMIT;
|
|
@@ -393,6 +436,10 @@ scan:
|
|
|
spin_lock(&swap_lock);
|
|
|
goto checks;
|
|
|
}
|
|
|
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
|
|
|
+ spin_lock(&swap_lock);
|
|
|
+ goto checks;
|
|
|
+ }
|
|
|
if (unlikely(--latency_ration < 0)) {
|
|
|
cond_resched();
|
|
|
latency_ration = LATENCY_LIMIT;
|