123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455 |
- /*
- * linux/mm/compaction.c
- *
- * Memory compaction for the reduction of external fragmentation. Note that
- * this heavily depends upon page migration to do all the real heavy
- * lifting
- *
- * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
- */
- #include <linux/swap.h>
- #include <linux/migrate.h>
- #include <linux/compaction.h>
- #include <linux/mm_inline.h>
- #include <linux/backing-dev.h>
- #include <linux/sysctl.h>
- #include "internal.h"
- /*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
- struct compact_control {
- struct list_head freepages; /* List of free pages to migrate to */
- struct list_head migratepages; /* List of pages being migrated */
- unsigned long nr_freepages; /* Number of isolated free pages */
- unsigned long nr_migratepages; /* Number of pages to migrate */
- unsigned long free_pfn; /* isolate_freepages search base */
- unsigned long migrate_pfn; /* isolate_migratepages search base */
- /* Account for isolated anon and file pages */
- unsigned long nr_anon;
- unsigned long nr_file;
- struct zone *zone;
- };
- static unsigned long release_freepages(struct list_head *freelist)
- {
- struct page *page, *next;
- unsigned long count = 0;
- list_for_each_entry_safe(page, next, freelist, lru) {
- list_del(&page->lru);
- __free_page(page);
- count++;
- }
- return count;
- }
- /* Isolate free pages onto a private freelist. Must hold zone->lock */
- static unsigned long isolate_freepages_block(struct zone *zone,
- unsigned long blockpfn,
- struct list_head *freelist)
- {
- unsigned long zone_end_pfn, end_pfn;
- int total_isolated = 0;
- struct page *cursor;
- /* Get the last PFN we should scan for free pages at */
- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
- /* Find the first usable PFN in the block to initialse page cursor */
- for (; blockpfn < end_pfn; blockpfn++) {
- if (pfn_valid_within(blockpfn))
- break;
- }
- cursor = pfn_to_page(blockpfn);
- /* Isolate free pages. This assumes the block is valid */
- for (; blockpfn < end_pfn; blockpfn++, cursor++) {
- int isolated, i;
- struct page *page = cursor;
- if (!pfn_valid_within(blockpfn))
- continue;
- if (!PageBuddy(page))
- continue;
- /* Found a free page, break it into order-0 pages */
- isolated = split_free_page(page);
- total_isolated += isolated;
- for (i = 0; i < isolated; i++) {
- list_add(&page->lru, freelist);
- page++;
- }
- /* If a page was split, advance to the end of it */
- if (isolated) {
- blockpfn += isolated - 1;
- cursor += isolated - 1;
- }
- }
- return total_isolated;
- }
- /* Returns true if the page is within a block suitable for migration to */
- static bool suitable_migration_target(struct page *page)
- {
- int migratetype = get_pageblock_migratetype(page);
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
- return false;
- /* If the page is a large free page, then allow migration */
- if (PageBuddy(page) && page_order(page) >= pageblock_order)
- return true;
- /* If the block is MIGRATE_MOVABLE, allow migration */
- if (migratetype == MIGRATE_MOVABLE)
- return true;
- /* Otherwise skip the block */
- return false;
- }
- /*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
- */
- static void isolate_freepages(struct zone *zone,
- struct compact_control *cc)
- {
- struct page *page;
- unsigned long high_pfn, low_pfn, pfn;
- unsigned long flags;
- int nr_freepages = cc->nr_freepages;
- struct list_head *freelist = &cc->freepages;
- pfn = cc->free_pfn;
- low_pfn = cc->migrate_pfn + pageblock_nr_pages;
- high_pfn = low_pfn;
- /*
- * Isolate free pages until enough are available to migrate the
- * pages on cc->migratepages. We stop searching if the migrate
- * and free page scanners meet or enough free pages are isolated.
- */
- spin_lock_irqsave(&zone->lock, flags);
- for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
- pfn -= pageblock_nr_pages) {
- unsigned long isolated;
- if (!pfn_valid(pfn))
- continue;
- /*
- * Check for overlapping nodes/zones. It's possible on some
- * configurations to have a setup like
- * node0 node1 node0
- * i.e. it's possible that all pages within a zones range of
- * pages do not belong to a single zone.
- */
- page = pfn_to_page(pfn);
- if (page_zone(page) != zone)
- continue;
- /* Check the block is suitable for migration */
- if (!suitable_migration_target(page))
- continue;
- /* Found a block suitable for isolating free pages from */
- isolated = isolate_freepages_block(zone, pfn, freelist);
- nr_freepages += isolated;
- /*
- * Record the highest PFN we isolated pages from. When next
- * looking for free pages, the search will restart here as
- * page migration may have returned some pages to the allocator
- */
- if (isolated)
- high_pfn = max(high_pfn, pfn);
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- /* split_free_page does not map the pages */
- list_for_each_entry(page, freelist, lru) {
- arch_alloc_page(page, 0);
- kernel_map_pages(page, 1, 1);
- }
- cc->free_pfn = high_pfn;
- cc->nr_freepages = nr_freepages;
- }
- /* Update the number of anon and file isolated pages in the zone */
- static void acct_isolated(struct zone *zone, struct compact_control *cc)
- {
- struct page *page;
- unsigned int count[NR_LRU_LISTS] = { 0, };
- list_for_each_entry(page, &cc->migratepages, lru) {
- int lru = page_lru_base_type(page);
- count[lru]++;
- }
- cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
- cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
- }
- /* Similar to reclaim, but different enough that they don't share logic */
- static bool too_many_isolated(struct zone *zone)
- {
- unsigned long inactive, isolated;
- inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_ANON);
- isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
- zone_page_state(zone, NR_ISOLATED_ANON);
- return isolated > inactive;
- }
- /*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
- */
- static unsigned long isolate_migratepages(struct zone *zone,
- struct compact_control *cc)
- {
- unsigned long low_pfn, end_pfn;
- struct list_head *migratelist = &cc->migratepages;
- /* Do not scan outside zone boundaries */
- low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
- /* Only scan within a pageblock boundary */
- end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
- /* Do not cross the free scanner or scan within a memory hole */
- if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
- cc->migrate_pfn = end_pfn;
- return 0;
- }
- /*
- * Ensure that there are not too many pages isolated from the LRU
- * list by either parallel reclaimers or compaction. If there are,
- * delay for some time until fewer pages are isolated
- */
- while (unlikely(too_many_isolated(zone))) {
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- if (fatal_signal_pending(current))
- return 0;
- }
- /* Time to isolate some pages for migration */
- spin_lock_irq(&zone->lru_lock);
- for (; low_pfn < end_pfn; low_pfn++) {
- struct page *page;
- if (!pfn_valid_within(low_pfn))
- continue;
- /* Get the page and skip if free */
- page = pfn_to_page(low_pfn);
- if (PageBuddy(page))
- continue;
- /* Try isolate the page */
- if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
- continue;
- /* Successfully isolated */
- del_page_from_lru_list(zone, page, page_lru(page));
- list_add(&page->lru, migratelist);
- mem_cgroup_del_lru(page);
- cc->nr_migratepages++;
- /* Avoid isolating too much */
- if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
- break;
- }
- acct_isolated(zone, cc);
- spin_unlock_irq(&zone->lru_lock);
- cc->migrate_pfn = low_pfn;
- return cc->nr_migratepages;
- }
- /*
- * This is a migrate-callback that "allocates" freepages by taking pages
- * from the isolated freelists in the block we are migrating to.
- */
- static struct page *compaction_alloc(struct page *migratepage,
- unsigned long data,
- int **result)
- {
- struct compact_control *cc = (struct compact_control *)data;
- struct page *freepage;
- /* Isolate free pages if necessary */
- if (list_empty(&cc->freepages)) {
- isolate_freepages(cc->zone, cc);
- if (list_empty(&cc->freepages))
- return NULL;
- }
- freepage = list_entry(cc->freepages.next, struct page, lru);
- list_del(&freepage->lru);
- cc->nr_freepages--;
- return freepage;
- }
- /*
- * We cannot control nr_migratepages and nr_freepages fully when migration is
- * running as migrate_pages() has no knowledge of compact_control. When
- * migration is complete, we count the number of pages on the lists by hand.
- */
- static void update_nr_listpages(struct compact_control *cc)
- {
- int nr_migratepages = 0;
- int nr_freepages = 0;
- struct page *page;
- list_for_each_entry(page, &cc->migratepages, lru)
- nr_migratepages++;
- list_for_each_entry(page, &cc->freepages, lru)
- nr_freepages++;
- cc->nr_migratepages = nr_migratepages;
- cc->nr_freepages = nr_freepages;
- }
- static int compact_finished(struct zone *zone,
- struct compact_control *cc)
- {
- if (fatal_signal_pending(current))
- return COMPACT_PARTIAL;
- /* Compaction run completes if the migrate and free scanner meet */
- if (cc->free_pfn <= cc->migrate_pfn)
- return COMPACT_COMPLETE;
- return COMPACT_CONTINUE;
- }
- static int compact_zone(struct zone *zone, struct compact_control *cc)
- {
- int ret;
- /* Setup to move all movable pages to the end of the zone */
- cc->migrate_pfn = zone->zone_start_pfn;
- cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
- cc->free_pfn &= ~(pageblock_nr_pages-1);
- migrate_prep_local();
- while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
- unsigned long nr_migrate, nr_remaining;
- if (!isolate_migratepages(zone, cc))
- continue;
- nr_migrate = cc->nr_migratepages;
- migrate_pages(&cc->migratepages, compaction_alloc,
- (unsigned long)cc, 0);
- update_nr_listpages(cc);
- nr_remaining = cc->nr_migratepages;
- count_vm_event(COMPACTBLOCKS);
- count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
- if (nr_remaining)
- count_vm_events(COMPACTPAGEFAILED, nr_remaining);
- /* Release LRU pages not migrated */
- if (!list_empty(&cc->migratepages)) {
- putback_lru_pages(&cc->migratepages);
- cc->nr_migratepages = 0;
- }
- }
- /* Release free pages and check accounting */
- cc->nr_freepages -= release_freepages(&cc->freepages);
- VM_BUG_ON(cc->nr_freepages != 0);
- return ret;
- }
- /* Compact all zones within a node */
- static int compact_node(int nid)
- {
- int zoneid;
- pg_data_t *pgdat;
- struct zone *zone;
- if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
- return -EINVAL;
- pgdat = NODE_DATA(nid);
- /* Flush pending updates to the LRU lists */
- lru_add_drain_all();
- for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
- struct compact_control cc = {
- .nr_freepages = 0,
- .nr_migratepages = 0,
- };
- zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
- continue;
- cc.zone = zone;
- INIT_LIST_HEAD(&cc.freepages);
- INIT_LIST_HEAD(&cc.migratepages);
- compact_zone(zone, &cc);
- VM_BUG_ON(!list_empty(&cc.freepages));
- VM_BUG_ON(!list_empty(&cc.migratepages));
- }
- return 0;
- }
- /* Compact all nodes in the system */
- static int compact_nodes(void)
- {
- int nid;
- for_each_online_node(nid)
- compact_node(nid);
- return COMPACT_COMPLETE;
- }
- /* The written value is actually unused, all memory is compacted */
- int sysctl_compact_memory;
- /* This is the entry point for compacting all nodes via /proc/sys/vm */
- int sysctl_compaction_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos)
- {
- if (write)
- return compact_nodes();
- return 0;
- }
|