Browse Source

vmscan: avoid subtraction of unsigned types

'slab_reclaimable' and 'nr_pages' are unsigned.  Subtraction is unsafe
because negative results would be misinterpreted.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
KOSAKI Motohiro 15 years ago
parent
commit
1574804899
1 changed files with 8 additions and 7 deletions
  1. 8 7
      mm/vmscan.c

+ 8 - 7
mm/vmscan.c

@@ -2600,7 +2600,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 		.swappiness = vm_swappiness,
 		.swappiness = vm_swappiness,
 		.order = order,
 		.order = order,
 	};
 	};
-	unsigned long slab_reclaimable;
+	unsigned long nr_slab_pages0, nr_slab_pages1;
 
 
 	cond_resched();
 	cond_resched();
 	/*
 	/*
@@ -2625,8 +2625,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
 		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
 	}
 	}
 
 
-	slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
-	if (slab_reclaimable > zone->min_slab_pages) {
+	nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+	if (nr_slab_pages0 > zone->min_slab_pages) {
 		/*
 		/*
 		 * shrink_slab() does not currently allow us to determine how
 		 * shrink_slab() does not currently allow us to determine how
 		 * many pages were freed in this zone. So we take the current
 		 * many pages were freed in this zone. So we take the current
@@ -2638,16 +2638,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 		 * take a long time.
 		 * take a long time.
 		 */
 		 */
 		while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
 		while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
-			zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
-				slab_reclaimable - nr_pages)
+		       (zone_page_state(zone, NR_SLAB_RECLAIMABLE) + nr_pages >
+				nr_slab_pages0))
 			;
 			;
 
 
 		/*
 		/*
 		 * Update nr_reclaimed by the number of slab pages we
 		 * Update nr_reclaimed by the number of slab pages we
 		 * reclaimed from this zone.
 		 * reclaimed from this zone.
 		 */
 		 */
-		sc.nr_reclaimed += slab_reclaimable -
-			zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+		nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+		if (nr_slab_pages1 < nr_slab_pages0)
+			sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
 	}
 	}
 
 
 	p->reclaim_state = NULL;
 	p->reclaim_state = NULL;