|
@@ -5218,10 +5218,6 @@ static void __setup_per_zone_wmarks(void)
|
|
|
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
|
|
|
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
|
|
|
|
|
- zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
|
|
|
- zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
|
|
|
- zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
|
|
|
-
|
|
|
setup_zone_migrate_reserve(zone);
|
|
|
spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
}
|
|
@@ -5766,54 +5762,6 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
|
|
|
return ret > 0 ? 0 : ret;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Update zone's cma pages counter used for watermark level calculation.
|
|
|
- */
|
|
|
-static inline void __update_cma_watermarks(struct zone *zone, int count)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- spin_lock_irqsave(&zone->lock, flags);
|
|
|
- zone->min_cma_pages += count;
|
|
|
- spin_unlock_irqrestore(&zone->lock, flags);
|
|
|
- setup_per_zone_wmarks();
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Trigger memory pressure bump to reclaim some pages in order to be able to
|
|
|
- * allocate 'count' pages in single page units. Does similar work as
|
|
|
- *__alloc_pages_slowpath() function.
|
|
|
- */
|
|
|
-static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
|
|
|
-{
|
|
|
- enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
|
|
- struct zonelist *zonelist = node_zonelist(0, gfp_mask);
|
|
|
- int did_some_progress = 0;
|
|
|
- int order = 1;
|
|
|
-
|
|
|
- /*
|
|
|
- * Increase level of watermarks to force kswapd do his job
|
|
|
- * to stabilise at new watermark level.
|
|
|
- */
|
|
|
- __update_cma_watermarks(zone, count);
|
|
|
-
|
|
|
- /* Obey watermarks as if the page was being allocated */
|
|
|
- while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
|
|
|
- wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
|
|
|
-
|
|
|
- did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
|
|
|
- NULL);
|
|
|
- if (!did_some_progress) {
|
|
|
- /* Exhausted what can be done so it's blamo time */
|
|
|
- out_of_memory(zonelist, gfp_mask, order, NULL, false);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Restore original watermark levels. */
|
|
|
- __update_cma_watermarks(zone, -count);
|
|
|
-
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* alloc_contig_range() -- tries to allocate given range of pages
|
|
|
* @start: start PFN to allocate
|
|
@@ -5837,7 +5785,6 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
|
|
|
int alloc_contig_range(unsigned long start, unsigned long end,
|
|
|
unsigned migratetype)
|
|
|
{
|
|
|
- struct zone *zone = page_zone(pfn_to_page(start));
|
|
|
unsigned long outer_start, outer_end;
|
|
|
int ret = 0, order;
|
|
|
|
|
@@ -5922,11 +5869,6 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Reclaim enough pages to make sure that contiguous allocation
|
|
|
- * will not starve the system.
|
|
|
- */
|
|
|
- __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
|
|
|
|
|
|
/* Grab isolated pages from freelists. */
|
|
|
outer_end = isolate_freepages_range(&cc, outer_start, end);
|