|
@@ -1150,10 +1150,15 @@ failed:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
|
|
|
-#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
|
|
|
-#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
|
|
|
-#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
|
|
|
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
|
|
|
+#define ALLOC_WMARK_MIN WMARK_MIN
|
|
|
+#define ALLOC_WMARK_LOW WMARK_LOW
|
|
|
+#define ALLOC_WMARK_HIGH WMARK_HIGH
|
|
|
+#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
|
|
|
+
|
|
|
+/* Mask to get the watermark bits */
|
|
|
+#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
|
|
|
+
|
|
|
#define ALLOC_HARDER 0x10 /* try to alloc harder */
|
|
|
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
|
|
|
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
|
|
@@ -1440,14 +1445,10 @@ zonelist_scan:
|
|
|
!cpuset_zone_allowed_softwall(zone, gfp_mask))
|
|
|
goto try_next_zone;
|
|
|
|
|
|
+ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
|
|
|
if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
|
|
|
unsigned long mark;
|
|
|
- if (alloc_flags & ALLOC_WMARK_MIN)
|
|
|
- mark = zone->pages_min;
|
|
|
- else if (alloc_flags & ALLOC_WMARK_LOW)
|
|
|
- mark = zone->pages_low;
|
|
|
- else
|
|
|
- mark = zone->pages_high;
|
|
|
+ mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
|
|
if (!zone_watermark_ok(zone, order, mark,
|
|
|
classzone_idx, alloc_flags)) {
|
|
|
if (!zone_reclaim_mode ||
|
|
@@ -1959,7 +1960,7 @@ static unsigned int nr_free_zone_pages(int offset)
|
|
|
|
|
|
for_each_zone_zonelist(zone, z, zonelist, offset) {
|
|
|
unsigned long size = zone->present_pages;
|
|
|
- unsigned long high = zone->pages_high;
|
|
|
+ unsigned long high = high_wmark_pages(zone);
|
|
|
if (size > high)
|
|
|
sum += size - high;
|
|
|
}
|
|
@@ -2096,9 +2097,9 @@ void show_free_areas(void)
|
|
|
"\n",
|
|
|
zone->name,
|
|
|
K(zone_page_state(zone, NR_FREE_PAGES)),
|
|
|
- K(zone->pages_min),
|
|
|
- K(zone->pages_low),
|
|
|
- K(zone->pages_high),
|
|
|
+ K(min_wmark_pages(zone)),
|
|
|
+ K(low_wmark_pages(zone)),
|
|
|
+ K(high_wmark_pages(zone)),
|
|
|
K(zone_page_state(zone, NR_ACTIVE_ANON)),
|
|
|
K(zone_page_state(zone, NR_INACTIVE_ANON)),
|
|
|
K(zone_page_state(zone, NR_ACTIVE_FILE)),
|
|
@@ -2702,8 +2703,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
|
|
|
|
|
|
/*
|
|
|
* Mark a number of pageblocks as MIGRATE_RESERVE. The number
|
|
|
- * of blocks reserved is based on zone->pages_min. The memory within the
|
|
|
- * reserve will tend to store contiguous free pages. Setting min_free_kbytes
|
|
|
+ * of blocks reserved is based on min_wmark_pages(zone). The memory within
|
|
|
+ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
|
|
|
* higher will lead to a bigger reserve which will get freed as contiguous
|
|
|
* blocks as reclaim kicks in
|
|
|
*/
|
|
@@ -2716,7 +2717,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
|
|
|
/* Get the start pfn, end pfn and the number of blocks to reserve */
|
|
|
start_pfn = zone->zone_start_pfn;
|
|
|
end_pfn = start_pfn + zone->spanned_pages;
|
|
|
- reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
|
|
|
+ reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
|
|
|
pageblock_order;
|
|
|
|
|
|
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
|
|
@@ -4319,8 +4320,8 @@ static void calculate_totalreserve_pages(void)
|
|
|
max = zone->lowmem_reserve[j];
|
|
|
}
|
|
|
|
|
|
- /* we treat pages_high as reserved pages. */
|
|
|
- max += zone->pages_high;
|
|
|
+ /* we treat the high watermark as reserved pages. */
|
|
|
+ max += high_wmark_pages(zone);
|
|
|
|
|
|
if (max > zone->present_pages)
|
|
|
max = zone->present_pages;
|
|
@@ -4400,7 +4401,7 @@ void setup_per_zone_pages_min(void)
|
|
|
* need highmem pages, so cap pages_min to a small
|
|
|
* value here.
|
|
|
*
|
|
|
- * The (pages_high-pages_low) and (pages_low-pages_min)
|
|
|
+ * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
|
|
|
* deltas controls asynch page reclaim, and so should
|
|
|
* not be capped for highmem.
|
|
|
*/
|
|
@@ -4411,17 +4412,17 @@ void setup_per_zone_pages_min(void)
|
|
|
min_pages = SWAP_CLUSTER_MAX;
|
|
|
if (min_pages > 128)
|
|
|
min_pages = 128;
|
|
|
- zone->pages_min = min_pages;
|
|
|
+ zone->watermark[WMARK_MIN] = min_pages;
|
|
|
} else {
|
|
|
/*
|
|
|
* If it's a lowmem zone, reserve a number of pages
|
|
|
* proportionate to the zone's size.
|
|
|
*/
|
|
|
- zone->pages_min = tmp;
|
|
|
+ zone->watermark[WMARK_MIN] = tmp;
|
|
|
}
|
|
|
|
|
|
- zone->pages_low = zone->pages_min + (tmp >> 2);
|
|
|
- zone->pages_high = zone->pages_min + (tmp >> 1);
|
|
|
+ zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
|
|
|
+ zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
|
|
setup_zone_migrate_reserve(zone);
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
}
|
|
@@ -4566,7 +4567,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
|
|
|
* whenever sysctl_lowmem_reserve_ratio changes.
|
|
|
*
|
|
|
* The reserve ratio obviously has absolutely no relation with the
|
|
|
- * pages_min watermarks. The lowmem reserve ratio can only make sense
|
|
|
+ * minimum watermarks. The lowmem reserve ratio can only make sense
|
|
|
* if in function of the boot time zone sizes.
|
|
|
*/
|
|
|
int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
|