|
@@ -1708,14 +1708,13 @@ static bool in_reclaim_compaction(struct scan_control *sc)
|
|
|
* calls try_to_compact_zone() that it will have enough free pages to succeed.
|
|
|
* It will give up earlier than that if there is difficulty reclaiming pages.
|
|
|
*/
|
|
|
-static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
|
|
|
+static inline bool should_continue_reclaim(struct lruvec *lruvec,
|
|
|
unsigned long nr_reclaimed,
|
|
|
unsigned long nr_scanned,
|
|
|
struct scan_control *sc)
|
|
|
{
|
|
|
unsigned long pages_for_compaction;
|
|
|
unsigned long inactive_lru_pages;
|
|
|
- struct lruvec *lruvec;
|
|
|
|
|
|
/* If not in reclaim/compaction mode, stop */
|
|
|
if (!in_reclaim_compaction(sc))
|
|
@@ -1748,7 +1747,6 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
|
|
|
* If we have not reclaimed enough pages for compaction and the
|
|
|
* inactive lists are large enough, continue reclaiming
|
|
|
*/
|
|
|
- lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
|
|
|
pages_for_compaction = (2UL << sc->order);
|
|
|
inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
|
|
|
if (nr_swap_pages > 0)
|
|
@@ -1759,7 +1757,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
|
|
|
return true;
|
|
|
|
|
|
/* If compaction would go ahead or the allocation would succeed, stop */
|
|
|
- switch (compaction_suitable(mz->zone, sc->order)) {
|
|
|
+ switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
|
|
|
case COMPACT_PARTIAL:
|
|
|
case COMPACT_CONTINUE:
|
|
|
return false;
|
|
@@ -1826,7 +1824,7 @@ restart:
|
|
|
sc, LRU_ACTIVE_ANON);
|
|
|
|
|
|
/* reclaim/compaction might need reclaim to continue */
|
|
|
- if (should_continue_reclaim(mz, nr_reclaimed,
|
|
|
+ if (should_continue_reclaim(lruvec, nr_reclaimed,
|
|
|
sc->nr_scanned - nr_scanned, sc))
|
|
|
goto restart;
|
|
|
|