|
@@ -2570,7 +2570,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
|
|
|
static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
|
|
|
int *classzone_idx)
|
|
|
{
|
|
|
- int all_zones_ok;
|
|
|
+ struct zone *unbalanced_zone;
|
|
|
unsigned long balanced;
|
|
|
int i;
|
|
|
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
|
|
@@ -2604,7 +2604,7 @@ loop_again:
|
|
|
unsigned long lru_pages = 0;
|
|
|
int has_under_min_watermark_zone = 0;
|
|
|
|
|
|
- all_zones_ok = 1;
|
|
|
+ unbalanced_zone = NULL;
|
|
|
balanced = 0;
|
|
|
|
|
|
/*
|
|
@@ -2743,7 +2743,7 @@ loop_again:
|
|
|
}
|
|
|
|
|
|
if (!zone_balanced(zone, testorder, 0, end_zone)) {
|
|
|
- all_zones_ok = 0;
|
|
|
+ unbalanced_zone = zone;
|
|
|
/*
|
|
|
* We are still under min water mark. This
|
|
|
* means that we have a GFP_ATOMIC allocation
|
|
@@ -2776,7 +2776,7 @@ loop_again:
|
|
|
pfmemalloc_watermark_ok(pgdat))
|
|
|
wake_up(&pgdat->pfmemalloc_wait);
|
|
|
|
|
|
- if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
|
|
|
+ if (!unbalanced_zone || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
|
|
|
break; /* kswapd: all done */
|
|
|
/*
|
|
|
* OK, kswapd is getting into trouble. Take a nap, then take
|
|
@@ -2786,7 +2786,7 @@ loop_again:
|
|
|
if (has_under_min_watermark_zone)
|
|
|
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
|
|
|
else
|
|
|
- congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
+ wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2805,7 +2805,7 @@ out:
|
|
|
* high-order: Balanced zones must make up at least 25% of the node
|
|
|
* for the node to be balanced
|
|
|
*/
|
|
|
- if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
|
|
|
+ if (unbalanced_zone && (!order || !pgdat_balanced(pgdat, balanced, *classzone_idx))) {
|
|
|
cond_resched();
|
|
|
|
|
|
try_to_freeze();
|