compaction.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. /*
  2. * linux/mm/compaction.c
  3. *
  4. * Memory compaction for the reduction of external fragmentation. Note that
  5. * this heavily depends upon page migration to do all the real heavy
  6. * lifting
  7. *
  8. * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  9. */
  10. #include <linux/swap.h>
  11. #include <linux/migrate.h>
  12. #include <linux/compaction.h>
  13. #include <linux/mm_inline.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/sysfs.h>
  17. #include "internal.h"
  18. #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  19. #define CREATE_TRACE_POINTS
  20. #include <trace/events/compaction.h>
  21. static unsigned long release_freepages(struct list_head *freelist)
  22. {
  23. struct page *page, *next;
  24. unsigned long count = 0;
  25. list_for_each_entry_safe(page, next, freelist, lru) {
  26. list_del(&page->lru);
  27. __free_page(page);
  28. count++;
  29. }
  30. return count;
  31. }
  32. static void map_pages(struct list_head *list)
  33. {
  34. struct page *page;
  35. list_for_each_entry(page, list, lru) {
  36. arch_alloc_page(page, 0);
  37. kernel_map_pages(page, 1, 1);
  38. }
  39. }
  40. static inline bool migrate_async_suitable(int migratetype)
  41. {
  42. return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  43. }
  44. #ifdef CONFIG_COMPACTION
  45. /* Returns true if the pageblock should be scanned for pages to isolate. */
  46. static inline bool isolation_suitable(struct compact_control *cc,
  47. struct page *page)
  48. {
  49. if (cc->ignore_skip_hint)
  50. return true;
  51. return !get_pageblock_skip(page);
  52. }
  53. /*
  54. * This function is called to clear all cached information on pageblocks that
  55. * should be skipped for page isolation when the migrate and free page scanner
  56. * meet.
  57. */
  58. static void __reset_isolation_suitable(struct zone *zone)
  59. {
  60. unsigned long start_pfn = zone->zone_start_pfn;
  61. unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  62. unsigned long pfn;
  63. zone->compact_cached_migrate_pfn = start_pfn;
  64. zone->compact_cached_free_pfn = end_pfn;
  65. zone->compact_blockskip_flush = false;
  66. /* Walk the zone and mark every pageblock as suitable for isolation */
  67. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  68. struct page *page;
  69. cond_resched();
  70. if (!pfn_valid(pfn))
  71. continue;
  72. page = pfn_to_page(pfn);
  73. if (zone != page_zone(page))
  74. continue;
  75. clear_pageblock_skip(page);
  76. }
  77. }
  78. void reset_isolation_suitable(pg_data_t *pgdat)
  79. {
  80. int zoneid;
  81. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  82. struct zone *zone = &pgdat->node_zones[zoneid];
  83. if (!populated_zone(zone))
  84. continue;
  85. /* Only flush if a full compaction finished recently */
  86. if (zone->compact_blockskip_flush)
  87. __reset_isolation_suitable(zone);
  88. }
  89. }
  90. /*
  91. * If no pages were isolated then mark this pageblock to be skipped in the
  92. * future. The information is later cleared by __reset_isolation_suitable().
  93. */
  94. static void update_pageblock_skip(struct compact_control *cc,
  95. struct page *page, unsigned long nr_isolated,
  96. bool migrate_scanner)
  97. {
  98. struct zone *zone = cc->zone;
  99. if (!page)
  100. return;
  101. if (!nr_isolated) {
  102. unsigned long pfn = page_to_pfn(page);
  103. set_pageblock_skip(page);
  104. /* Update where compaction should restart */
  105. if (migrate_scanner) {
  106. if (!cc->finished_update_migrate &&
  107. pfn > zone->compact_cached_migrate_pfn)
  108. zone->compact_cached_migrate_pfn = pfn;
  109. } else {
  110. if (!cc->finished_update_free &&
  111. pfn < zone->compact_cached_free_pfn)
  112. zone->compact_cached_free_pfn = pfn;
  113. }
  114. }
  115. }
  116. #else
  117. static inline bool isolation_suitable(struct compact_control *cc,
  118. struct page *page)
  119. {
  120. return true;
  121. }
  122. static void update_pageblock_skip(struct compact_control *cc,
  123. struct page *page, unsigned long nr_isolated,
  124. bool migrate_scanner)
  125. {
  126. }
  127. #endif /* CONFIG_COMPACTION */
  128. static inline bool should_release_lock(spinlock_t *lock)
  129. {
  130. return need_resched() || spin_is_contended(lock);
  131. }
  132. /*
  133. * Compaction requires the taking of some coarse locks that are potentially
  134. * very heavily contended. Check if the process needs to be scheduled or
  135. * if the lock is contended. For async compaction, back out in the event
  136. * if contention is severe. For sync compaction, schedule.
  137. *
  138. * Returns true if the lock is held.
  139. * Returns false if the lock is released and compaction should abort
  140. */
  141. static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
  142. bool locked, struct compact_control *cc)
  143. {
  144. if (should_release_lock(lock)) {
  145. if (locked) {
  146. spin_unlock_irqrestore(lock, *flags);
  147. locked = false;
  148. }
  149. /* async aborts if taking too long or contended */
  150. if (!cc->sync) {
  151. cc->contended = true;
  152. return false;
  153. }
  154. cond_resched();
  155. }
  156. if (!locked)
  157. spin_lock_irqsave(lock, *flags);
  158. return true;
  159. }
  160. static inline bool compact_trylock_irqsave(spinlock_t *lock,
  161. unsigned long *flags, struct compact_control *cc)
  162. {
  163. return compact_checklock_irqsave(lock, flags, false, cc);
  164. }
  165. /* Returns true if the page is within a block suitable for migration to */
  166. static bool suitable_migration_target(struct page *page)
  167. {
  168. int migratetype = get_pageblock_migratetype(page);
  169. /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
  170. if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
  171. return false;
  172. /* If the page is a large free page, then allow migration */
  173. if (PageBuddy(page) && page_order(page) >= pageblock_order)
  174. return true;
  175. /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  176. if (migrate_async_suitable(migratetype))
  177. return true;
  178. /* Otherwise skip the block */
  179. return false;
  180. }
  181. static void compact_capture_page(struct compact_control *cc)
  182. {
  183. unsigned long flags;
  184. int mtype, mtype_low, mtype_high;
  185. if (!cc->page || *cc->page)
  186. return;
  187. /*
  188. * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
  189. * regardless of the migratetype of the freelist is is captured from.
  190. * This is fine because the order for a high-order MIGRATE_MOVABLE
  191. * allocation is typically at least a pageblock size and overall
  192. * fragmentation is not impaired. Other allocation types must
  193. * capture pages from their own migratelist because otherwise they
  194. * could pollute other pageblocks like MIGRATE_MOVABLE with
  195. * difficult to move pages and making fragmentation worse overall.
  196. */
  197. if (cc->migratetype == MIGRATE_MOVABLE) {
  198. mtype_low = 0;
  199. mtype_high = MIGRATE_PCPTYPES;
  200. } else {
  201. mtype_low = cc->migratetype;
  202. mtype_high = cc->migratetype + 1;
  203. }
  204. /* Speculatively examine the free lists without zone lock */
  205. for (mtype = mtype_low; mtype < mtype_high; mtype++) {
  206. int order;
  207. for (order = cc->order; order < MAX_ORDER; order++) {
  208. struct page *page;
  209. struct free_area *area;
  210. area = &(cc->zone->free_area[order]);
  211. if (list_empty(&area->free_list[mtype]))
  212. continue;
  213. /* Take the lock and attempt capture of the page */
  214. if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
  215. return;
  216. if (!list_empty(&area->free_list[mtype])) {
  217. page = list_entry(area->free_list[mtype].next,
  218. struct page, lru);
  219. if (capture_free_page(page, cc->order, mtype)) {
  220. spin_unlock_irqrestore(&cc->zone->lock,
  221. flags);
  222. *cc->page = page;
  223. return;
  224. }
  225. }
  226. spin_unlock_irqrestore(&cc->zone->lock, flags);
  227. }
  228. }
  229. }
  230. /*
  231. * Isolate free pages onto a private freelist. Caller must hold zone->lock.
  232. * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
  233. * pages inside of the pageblock (even though it may still end up isolating
  234. * some pages).
  235. */
  236. static unsigned long isolate_freepages_block(struct compact_control *cc,
  237. unsigned long blockpfn,
  238. unsigned long end_pfn,
  239. struct list_head *freelist,
  240. bool strict)
  241. {
  242. int nr_scanned = 0, total_isolated = 0;
  243. struct page *cursor, *valid_page = NULL;
  244. unsigned long nr_strict_required = end_pfn - blockpfn;
  245. unsigned long flags;
  246. bool locked = false;
  247. cursor = pfn_to_page(blockpfn);
  248. /* Isolate free pages. */
  249. for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  250. int isolated, i;
  251. struct page *page = cursor;
  252. nr_scanned++;
  253. if (!pfn_valid_within(blockpfn))
  254. continue;
  255. if (!valid_page)
  256. valid_page = page;
  257. if (!PageBuddy(page))
  258. continue;
  259. /*
  260. * The zone lock must be held to isolate freepages.
  261. * Unfortunately this is a very coarse lock and can be
  262. * heavily contended if there are parallel allocations
  263. * or parallel compactions. For async compaction do not
  264. * spin on the lock and we acquire the lock as late as
  265. * possible.
  266. */
  267. locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
  268. locked, cc);
  269. if (!locked)
  270. break;
  271. /* Recheck this is a suitable migration target under lock */
  272. if (!strict && !suitable_migration_target(page))
  273. break;
  274. /* Recheck this is a buddy page under lock */
  275. if (!PageBuddy(page))
  276. continue;
  277. /* Found a free page, break it into order-0 pages */
  278. isolated = split_free_page(page);
  279. if (!isolated && strict)
  280. break;
  281. total_isolated += isolated;
  282. for (i = 0; i < isolated; i++) {
  283. list_add(&page->lru, freelist);
  284. page++;
  285. }
  286. /* If a page was split, advance to the end of it */
  287. if (isolated) {
  288. blockpfn += isolated - 1;
  289. cursor += isolated - 1;
  290. }
  291. }
  292. trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
  293. /*
  294. * If strict isolation is requested by CMA then check that all the
  295. * pages requested were isolated. If there were any failures, 0 is
  296. * returned and CMA will fail.
  297. */
  298. if (strict && nr_strict_required > total_isolated)
  299. total_isolated = 0;
  300. if (locked)
  301. spin_unlock_irqrestore(&cc->zone->lock, flags);
  302. /* Update the pageblock-skip if the whole pageblock was scanned */
  303. if (blockpfn == end_pfn)
  304. update_pageblock_skip(cc, valid_page, total_isolated, false);
  305. count_vm_events(COMPACTFREE_SCANNED, nr_scanned);
  306. if (total_isolated)
  307. count_vm_events(COMPACTISOLATED, total_isolated);
  308. return total_isolated;
  309. }
  310. /**
  311. * isolate_freepages_range() - isolate free pages.
  312. * @start_pfn: The first PFN to start isolating.
  313. * @end_pfn: The one-past-last PFN.
  314. *
  315. * Non-free pages, invalid PFNs, or zone boundaries within the
  316. * [start_pfn, end_pfn) range are considered errors, cause function to
  317. * undo its actions and return zero.
  318. *
  319. * Otherwise, function returns one-past-the-last PFN of isolated page
  320. * (which may be greater then end_pfn if end fell in a middle of
  321. * a free page).
  322. */
  323. unsigned long
  324. isolate_freepages_range(struct compact_control *cc,
  325. unsigned long start_pfn, unsigned long end_pfn)
  326. {
  327. unsigned long isolated, pfn, block_end_pfn;
  328. LIST_HEAD(freelist);
  329. for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
  330. if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
  331. break;
  332. /*
  333. * On subsequent iterations ALIGN() is actually not needed,
  334. * but we keep it that we not to complicate the code.
  335. */
  336. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  337. block_end_pfn = min(block_end_pfn, end_pfn);
  338. isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
  339. &freelist, true);
  340. /*
  341. * In strict mode, isolate_freepages_block() returns 0 if
  342. * there are any holes in the block (ie. invalid PFNs or
  343. * non-free pages).
  344. */
  345. if (!isolated)
  346. break;
  347. /*
  348. * If we managed to isolate pages, it is always (1 << n) *
  349. * pageblock_nr_pages for some non-negative n. (Max order
  350. * page may span two pageblocks).
  351. */
  352. }
  353. /* split_free_page does not map the pages */
  354. map_pages(&freelist);
  355. if (pfn < end_pfn) {
  356. /* Loop terminated early, cleanup. */
  357. release_freepages(&freelist);
  358. return 0;
  359. }
  360. /* We don't use freelists for anything. */
  361. return pfn;
  362. }
  363. /* Update the number of anon and file isolated pages in the zone */
  364. static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
  365. {
  366. struct page *page;
  367. unsigned int count[2] = { 0, };
  368. list_for_each_entry(page, &cc->migratepages, lru)
  369. count[!!page_is_file_cache(page)]++;
  370. /* If locked we can use the interrupt unsafe versions */
  371. if (locked) {
  372. __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  373. __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  374. } else {
  375. mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  376. mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
  377. }
  378. }
  379. /* Similar to reclaim, but different enough that they don't share logic */
  380. static bool too_many_isolated(struct zone *zone)
  381. {
  382. unsigned long active, inactive, isolated;
  383. inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
  384. zone_page_state(zone, NR_INACTIVE_ANON);
  385. active = zone_page_state(zone, NR_ACTIVE_FILE) +
  386. zone_page_state(zone, NR_ACTIVE_ANON);
  387. isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
  388. zone_page_state(zone, NR_ISOLATED_ANON);
  389. return isolated > (inactive + active) / 2;
  390. }
  391. /**
  392. * isolate_migratepages_range() - isolate all migrate-able pages in range.
  393. * @zone: Zone pages are in.
  394. * @cc: Compaction control structure.
  395. * @low_pfn: The first PFN of the range.
  396. * @end_pfn: The one-past-the-last PFN of the range.
  397. * @unevictable: true if it allows to isolate unevictable pages
  398. *
  399. * Isolate all pages that can be migrated from the range specified by
  400. * [low_pfn, end_pfn). Returns zero if there is a fatal signal
  401. * pending), otherwise PFN of the first page that was not scanned
  402. * (which may be both less, equal to or more then end_pfn).
  403. *
  404. * Assumes that cc->migratepages is empty and cc->nr_migratepages is
  405. * zero.
  406. *
  407. * Apart from cc->migratepages and cc->nr_migratetypes this function
  408. * does not modify any cc's fields, in particular it does not modify
  409. * (or read for that matter) cc->migrate_pfn.
  410. */
  411. unsigned long
  412. isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
  413. unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  414. {
  415. unsigned long last_pageblock_nr = 0, pageblock_nr;
  416. unsigned long nr_scanned = 0, nr_isolated = 0;
  417. struct list_head *migratelist = &cc->migratepages;
  418. isolate_mode_t mode = 0;
  419. struct lruvec *lruvec;
  420. unsigned long flags;
  421. bool locked = false;
  422. struct page *page = NULL, *valid_page = NULL;
  423. /*
  424. * Ensure that there are not too many pages isolated from the LRU
  425. * list by either parallel reclaimers or compaction. If there are,
  426. * delay for some time until fewer pages are isolated
  427. */
  428. while (unlikely(too_many_isolated(zone))) {
  429. /* async migration should just abort */
  430. if (!cc->sync)
  431. return 0;
  432. congestion_wait(BLK_RW_ASYNC, HZ/10);
  433. if (fatal_signal_pending(current))
  434. return 0;
  435. }
  436. /* Time to isolate some pages for migration */
  437. cond_resched();
  438. for (; low_pfn < end_pfn; low_pfn++) {
  439. /* give a chance to irqs before checking need_resched() */
  440. if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
  441. if (should_release_lock(&zone->lru_lock)) {
  442. spin_unlock_irqrestore(&zone->lru_lock, flags);
  443. locked = false;
  444. }
  445. }
  446. /*
  447. * migrate_pfn does not necessarily start aligned to a
  448. * pageblock. Ensure that pfn_valid is called when moving
  449. * into a new MAX_ORDER_NR_PAGES range in case of large
  450. * memory holes within the zone
  451. */
  452. if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
  453. if (!pfn_valid(low_pfn)) {
  454. low_pfn += MAX_ORDER_NR_PAGES - 1;
  455. continue;
  456. }
  457. }
  458. if (!pfn_valid_within(low_pfn))
  459. continue;
  460. nr_scanned++;
  461. /*
  462. * Get the page and ensure the page is within the same zone.
  463. * See the comment in isolate_freepages about overlapping
  464. * nodes. It is deliberate that the new zone lock is not taken
  465. * as memory compaction should not move pages between nodes.
  466. */
  467. page = pfn_to_page(low_pfn);
  468. if (page_zone(page) != zone)
  469. continue;
  470. if (!valid_page)
  471. valid_page = page;
  472. /* If isolation recently failed, do not retry */
  473. pageblock_nr = low_pfn >> pageblock_order;
  474. if (!isolation_suitable(cc, page))
  475. goto next_pageblock;
  476. /* Skip if free */
  477. if (PageBuddy(page))
  478. continue;
  479. /*
  480. * For async migration, also only scan in MOVABLE blocks. Async
  481. * migration is optimistic to see if the minimum amount of work
  482. * satisfies the allocation
  483. */
  484. if (!cc->sync && last_pageblock_nr != pageblock_nr &&
  485. !migrate_async_suitable(get_pageblock_migratetype(page))) {
  486. cc->finished_update_migrate = true;
  487. goto next_pageblock;
  488. }
  489. /* Check may be lockless but that's ok as we recheck later */
  490. if (!PageLRU(page))
  491. continue;
  492. /*
  493. * PageLRU is set. lru_lock normally excludes isolation
  494. * splitting and collapsing (collapsing has already happened
  495. * if PageLRU is set) but the lock is not necessarily taken
  496. * here and it is wasteful to take it just to check transhuge.
  497. * Check TransHuge without lock and skip the whole pageblock if
  498. * it's either a transhuge or hugetlbfs page, as calling
  499. * compound_order() without preventing THP from splitting the
  500. * page underneath us may return surprising results.
  501. */
  502. if (PageTransHuge(page)) {
  503. if (!locked)
  504. goto next_pageblock;
  505. low_pfn += (1 << compound_order(page)) - 1;
  506. continue;
  507. }
  508. /* Check if it is ok to still hold the lock */
  509. locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
  510. locked, cc);
  511. if (!locked || fatal_signal_pending(current))
  512. break;
  513. /* Recheck PageLRU and PageTransHuge under lock */
  514. if (!PageLRU(page))
  515. continue;
  516. if (PageTransHuge(page)) {
  517. low_pfn += (1 << compound_order(page)) - 1;
  518. continue;
  519. }
  520. if (!cc->sync)
  521. mode |= ISOLATE_ASYNC_MIGRATE;
  522. if (unevictable)
  523. mode |= ISOLATE_UNEVICTABLE;
  524. lruvec = mem_cgroup_page_lruvec(page, zone);
  525. /* Try isolate the page */
  526. if (__isolate_lru_page(page, mode) != 0)
  527. continue;
  528. VM_BUG_ON(PageTransCompound(page));
  529. /* Successfully isolated */
  530. cc->finished_update_migrate = true;
  531. del_page_from_lru_list(page, lruvec, page_lru(page));
  532. list_add(&page->lru, migratelist);
  533. cc->nr_migratepages++;
  534. nr_isolated++;
  535. /* Avoid isolating too much */
  536. if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  537. ++low_pfn;
  538. break;
  539. }
  540. continue;
  541. next_pageblock:
  542. low_pfn += pageblock_nr_pages;
  543. low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
  544. last_pageblock_nr = pageblock_nr;
  545. }
  546. acct_isolated(zone, locked, cc);
  547. if (locked)
  548. spin_unlock_irqrestore(&zone->lru_lock, flags);
  549. /* Update the pageblock-skip if the whole pageblock was scanned */
  550. if (low_pfn == end_pfn)
  551. update_pageblock_skip(cc, valid_page, nr_isolated, true);
  552. trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
  553. count_vm_events(COMPACTMIGRATE_SCANNED, nr_scanned);
  554. if (nr_isolated)
  555. count_vm_events(COMPACTISOLATED, nr_isolated);
  556. return low_pfn;
  557. }
  558. #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  559. #ifdef CONFIG_COMPACTION
  560. /*
  561. * Based on information in the current compact_control, find blocks
  562. * suitable for isolating free pages from and then isolate them.
  563. */
  564. static void isolate_freepages(struct zone *zone,
  565. struct compact_control *cc)
  566. {
  567. struct page *page;
  568. unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
  569. int nr_freepages = cc->nr_freepages;
  570. struct list_head *freelist = &cc->freepages;
  571. /*
  572. * Initialise the free scanner. The starting point is where we last
  573. * scanned from (or the end of the zone if starting). The low point
  574. * is the end of the pageblock the migration scanner is using.
  575. */
  576. pfn = cc->free_pfn;
  577. low_pfn = cc->migrate_pfn + pageblock_nr_pages;
  578. /*
  579. * Take care that if the migration scanner is at the end of the zone
  580. * that the free scanner does not accidentally move to the next zone
  581. * in the next isolation cycle.
  582. */
  583. high_pfn = min(low_pfn, pfn);
  584. zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  585. /*
  586. * Isolate free pages until enough are available to migrate the
  587. * pages on cc->migratepages. We stop searching if the migrate
  588. * and free page scanners meet or enough free pages are isolated.
  589. */
  590. for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
  591. pfn -= pageblock_nr_pages) {
  592. unsigned long isolated;
  593. if (!pfn_valid(pfn))
  594. continue;
  595. /*
  596. * Check for overlapping nodes/zones. It's possible on some
  597. * configurations to have a setup like
  598. * node0 node1 node0
  599. * i.e. it's possible that all pages within a zones range of
  600. * pages do not belong to a single zone.
  601. */
  602. page = pfn_to_page(pfn);
  603. if (page_zone(page) != zone)
  604. continue;
  605. /* Check the block is suitable for migration */
  606. if (!suitable_migration_target(page))
  607. continue;
  608. /* If isolation recently failed, do not retry */
  609. if (!isolation_suitable(cc, page))
  610. continue;
  611. /* Found a block suitable for isolating free pages from */
  612. isolated = 0;
  613. end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
  614. isolated = isolate_freepages_block(cc, pfn, end_pfn,
  615. freelist, false);
  616. nr_freepages += isolated;
  617. /*
  618. * Record the highest PFN we isolated pages from. When next
  619. * looking for free pages, the search will restart here as
  620. * page migration may have returned some pages to the allocator
  621. */
  622. if (isolated) {
  623. cc->finished_update_free = true;
  624. high_pfn = max(high_pfn, pfn);
  625. }
  626. }
  627. /* split_free_page does not map the pages */
  628. map_pages(freelist);
  629. cc->free_pfn = high_pfn;
  630. cc->nr_freepages = nr_freepages;
  631. }
  632. /*
  633. * This is a migrate-callback that "allocates" freepages by taking pages
  634. * from the isolated freelists in the block we are migrating to.
  635. */
  636. static struct page *compaction_alloc(struct page *migratepage,
  637. unsigned long data,
  638. int **result)
  639. {
  640. struct compact_control *cc = (struct compact_control *)data;
  641. struct page *freepage;
  642. /* Isolate free pages if necessary */
  643. if (list_empty(&cc->freepages)) {
  644. isolate_freepages(cc->zone, cc);
  645. if (list_empty(&cc->freepages))
  646. return NULL;
  647. }
  648. freepage = list_entry(cc->freepages.next, struct page, lru);
  649. list_del(&freepage->lru);
  650. cc->nr_freepages--;
  651. return freepage;
  652. }
  653. /*
  654. * We cannot control nr_migratepages and nr_freepages fully when migration is
  655. * running as migrate_pages() has no knowledge of compact_control. When
  656. * migration is complete, we count the number of pages on the lists by hand.
  657. */
  658. static void update_nr_listpages(struct compact_control *cc)
  659. {
  660. int nr_migratepages = 0;
  661. int nr_freepages = 0;
  662. struct page *page;
  663. list_for_each_entry(page, &cc->migratepages, lru)
  664. nr_migratepages++;
  665. list_for_each_entry(page, &cc->freepages, lru)
  666. nr_freepages++;
  667. cc->nr_migratepages = nr_migratepages;
  668. cc->nr_freepages = nr_freepages;
  669. }
  670. /* possible outcome of isolate_migratepages */
  671. typedef enum {
  672. ISOLATE_ABORT, /* Abort compaction now */
  673. ISOLATE_NONE, /* No pages isolated, continue scanning */
  674. ISOLATE_SUCCESS, /* Pages isolated, migrate */
  675. } isolate_migrate_t;
  676. /*
  677. * Isolate all pages that can be migrated from the block pointed to by
  678. * the migrate scanner within compact_control.
  679. */
  680. static isolate_migrate_t isolate_migratepages(struct zone *zone,
  681. struct compact_control *cc)
  682. {
  683. unsigned long low_pfn, end_pfn;
  684. /* Do not scan outside zone boundaries */
  685. low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
  686. /* Only scan within a pageblock boundary */
  687. end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
  688. /* Do not cross the free scanner or scan within a memory hole */
  689. if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
  690. cc->migrate_pfn = end_pfn;
  691. return ISOLATE_NONE;
  692. }
  693. /* Perform the isolation */
  694. low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
  695. if (!low_pfn || cc->contended)
  696. return ISOLATE_ABORT;
  697. cc->migrate_pfn = low_pfn;
  698. return ISOLATE_SUCCESS;
  699. }
  700. static int compact_finished(struct zone *zone,
  701. struct compact_control *cc)
  702. {
  703. unsigned long watermark;
  704. if (fatal_signal_pending(current))
  705. return COMPACT_PARTIAL;
  706. /* Compaction run completes if the migrate and free scanner meet */
  707. if (cc->free_pfn <= cc->migrate_pfn) {
  708. /*
  709. * Mark that the PG_migrate_skip information should be cleared
  710. * by kswapd when it goes to sleep. kswapd does not set the
  711. * flag itself as the decision to be clear should be directly
  712. * based on an allocation request.
  713. */
  714. if (!current_is_kswapd())
  715. zone->compact_blockskip_flush = true;
  716. return COMPACT_COMPLETE;
  717. }
  718. /*
  719. * order == -1 is expected when compacting via
  720. * /proc/sys/vm/compact_memory
  721. */
  722. if (cc->order == -1)
  723. return COMPACT_CONTINUE;
  724. /* Compaction run is not finished if the watermark is not met */
  725. watermark = low_wmark_pages(zone);
  726. watermark += (1 << cc->order);
  727. if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
  728. return COMPACT_CONTINUE;
  729. /* Direct compactor: Is a suitable page free? */
  730. if (cc->page) {
  731. /* Was a suitable page captured? */
  732. if (*cc->page)
  733. return COMPACT_PARTIAL;
  734. } else {
  735. unsigned int order;
  736. for (order = cc->order; order < MAX_ORDER; order++) {
  737. struct free_area *area = &zone->free_area[cc->order];
  738. /* Job done if page is free of the right migratetype */
  739. if (!list_empty(&area->free_list[cc->migratetype]))
  740. return COMPACT_PARTIAL;
  741. /* Job done if allocation would set block type */
  742. if (cc->order >= pageblock_order && area->nr_free)
  743. return COMPACT_PARTIAL;
  744. }
  745. }
  746. return COMPACT_CONTINUE;
  747. }
  748. /*
  749. * compaction_suitable: Is this suitable to run compaction on this zone now?
  750. * Returns
  751. * COMPACT_SKIPPED - If there are too few free pages for compaction
  752. * COMPACT_PARTIAL - If the allocation would succeed without compaction
  753. * COMPACT_CONTINUE - If compaction should run now
  754. */
  755. unsigned long compaction_suitable(struct zone *zone, int order)
  756. {
  757. int fragindex;
  758. unsigned long watermark;
  759. /*
  760. * order == -1 is expected when compacting via
  761. * /proc/sys/vm/compact_memory
  762. */
  763. if (order == -1)
  764. return COMPACT_CONTINUE;
  765. /*
  766. * Watermarks for order-0 must be met for compaction. Note the 2UL.
  767. * This is because during migration, copies of pages need to be
  768. * allocated and for a short time, the footprint is higher
  769. */
  770. watermark = low_wmark_pages(zone) + (2UL << order);
  771. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  772. return COMPACT_SKIPPED;
  773. /*
  774. * fragmentation index determines if allocation failures are due to
  775. * low memory or external fragmentation
  776. *
  777. * index of -1000 implies allocations might succeed depending on
  778. * watermarks
  779. * index towards 0 implies failure is due to lack of memory
  780. * index towards 1000 implies failure is due to fragmentation
  781. *
  782. * Only compact if a failure would be due to fragmentation.
  783. */
  784. fragindex = fragmentation_index(zone, order);
  785. if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
  786. return COMPACT_SKIPPED;
  787. if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
  788. 0, 0))
  789. return COMPACT_PARTIAL;
  790. return COMPACT_CONTINUE;
  791. }
  792. static int compact_zone(struct zone *zone, struct compact_control *cc)
  793. {
  794. int ret;
  795. unsigned long start_pfn = zone->zone_start_pfn;
  796. unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  797. ret = compaction_suitable(zone, cc->order);
  798. switch (ret) {
  799. case COMPACT_PARTIAL:
  800. case COMPACT_SKIPPED:
  801. /* Compaction is likely to fail */
  802. return ret;
  803. case COMPACT_CONTINUE:
  804. /* Fall through to compaction */
  805. ;
  806. }
  807. /*
  808. * Setup to move all movable pages to the end of the zone. Used cached
  809. * information on where the scanners should start but check that it
  810. * is initialised by ensuring the values are within zone boundaries.
  811. */
  812. cc->migrate_pfn = zone->compact_cached_migrate_pfn;
  813. cc->free_pfn = zone->compact_cached_free_pfn;
  814. if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
  815. cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
  816. zone->compact_cached_free_pfn = cc->free_pfn;
  817. }
  818. if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
  819. cc->migrate_pfn = start_pfn;
  820. zone->compact_cached_migrate_pfn = cc->migrate_pfn;
  821. }
  822. /*
  823. * Clear pageblock skip if there were failures recently and compaction
  824. * is about to be retried after being deferred. kswapd does not do
  825. * this reset as it'll reset the cached information when going to sleep.
  826. */
  827. if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
  828. __reset_isolation_suitable(zone);
  829. migrate_prep_local();
  830. while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
  831. unsigned long nr_migrate, nr_remaining;
  832. int err;
  833. switch (isolate_migratepages(zone, cc)) {
  834. case ISOLATE_ABORT:
  835. ret = COMPACT_PARTIAL;
  836. putback_lru_pages(&cc->migratepages);
  837. cc->nr_migratepages = 0;
  838. goto out;
  839. case ISOLATE_NONE:
  840. continue;
  841. case ISOLATE_SUCCESS:
  842. ;
  843. }
  844. nr_migrate = cc->nr_migratepages;
  845. err = migrate_pages(&cc->migratepages, compaction_alloc,
  846. (unsigned long)cc, false,
  847. cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
  848. MR_COMPACTION);
  849. update_nr_listpages(cc);
  850. nr_remaining = cc->nr_migratepages;
  851. trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
  852. nr_remaining);
  853. /* Release LRU pages not migrated */
  854. if (err) {
  855. putback_lru_pages(&cc->migratepages);
  856. cc->nr_migratepages = 0;
  857. if (err == -ENOMEM) {
  858. ret = COMPACT_PARTIAL;
  859. goto out;
  860. }
  861. }
  862. /* Capture a page now if it is a suitable size */
  863. compact_capture_page(cc);
  864. }
  865. out:
  866. /* Release free pages and check accounting */
  867. cc->nr_freepages -= release_freepages(&cc->freepages);
  868. VM_BUG_ON(cc->nr_freepages != 0);
  869. return ret;
  870. }
  871. static unsigned long compact_zone_order(struct zone *zone,
  872. int order, gfp_t gfp_mask,
  873. bool sync, bool *contended,
  874. struct page **page)
  875. {
  876. unsigned long ret;
  877. struct compact_control cc = {
  878. .nr_freepages = 0,
  879. .nr_migratepages = 0,
  880. .order = order,
  881. .migratetype = allocflags_to_migratetype(gfp_mask),
  882. .zone = zone,
  883. .sync = sync,
  884. .page = page,
  885. };
  886. INIT_LIST_HEAD(&cc.freepages);
  887. INIT_LIST_HEAD(&cc.migratepages);
  888. ret = compact_zone(zone, &cc);
  889. VM_BUG_ON(!list_empty(&cc.freepages));
  890. VM_BUG_ON(!list_empty(&cc.migratepages));
  891. *contended = cc.contended;
  892. return ret;
  893. }
  894. int sysctl_extfrag_threshold = 500;
  895. /**
  896. * try_to_compact_pages - Direct compact to satisfy a high-order allocation
  897. * @zonelist: The zonelist used for the current allocation
  898. * @order: The order of the current allocation
  899. * @gfp_mask: The GFP mask of the current allocation
  900. * @nodemask: The allowed nodes to allocate from
  901. * @sync: Whether migration is synchronous or not
  902. * @contended: Return value that is true if compaction was aborted due to lock contention
  903. * @page: Optionally capture a free page of the requested order during compaction
  904. *
  905. * This is the main entry point for direct page compaction.
  906. */
  907. unsigned long try_to_compact_pages(struct zonelist *zonelist,
  908. int order, gfp_t gfp_mask, nodemask_t *nodemask,
  909. bool sync, bool *contended, struct page **page)
  910. {
  911. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  912. int may_enter_fs = gfp_mask & __GFP_FS;
  913. int may_perform_io = gfp_mask & __GFP_IO;
  914. struct zoneref *z;
  915. struct zone *zone;
  916. int rc = COMPACT_SKIPPED;
  917. int alloc_flags = 0;
  918. /* Check if the GFP flags allow compaction */
  919. if (!order || !may_enter_fs || !may_perform_io)
  920. return rc;
  921. count_vm_event(COMPACTSTALL);
  922. #ifdef CONFIG_CMA
  923. if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  924. alloc_flags |= ALLOC_CMA;
  925. #endif
  926. /* Compact each zone in the list */
  927. for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
  928. nodemask) {
  929. int status;
  930. status = compact_zone_order(zone, order, gfp_mask, sync,
  931. contended, page);
  932. rc = max(status, rc);
  933. /* If a normal allocation would succeed, stop compacting */
  934. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
  935. alloc_flags))
  936. break;
  937. }
  938. return rc;
  939. }
  940. /* Compact all zones within a node */
  941. static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
  942. {
  943. int zoneid;
  944. struct zone *zone;
  945. for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  946. zone = &pgdat->node_zones[zoneid];
  947. if (!populated_zone(zone))
  948. continue;
  949. cc->nr_freepages = 0;
  950. cc->nr_migratepages = 0;
  951. cc->zone = zone;
  952. INIT_LIST_HEAD(&cc->freepages);
  953. INIT_LIST_HEAD(&cc->migratepages);
  954. if (cc->order == -1 || !compaction_deferred(zone, cc->order))
  955. compact_zone(zone, cc);
  956. if (cc->order > 0) {
  957. int ok = zone_watermark_ok(zone, cc->order,
  958. low_wmark_pages(zone), 0, 0);
  959. if (ok && cc->order >= zone->compact_order_failed)
  960. zone->compact_order_failed = cc->order + 1;
  961. /* Currently async compaction is never deferred. */
  962. else if (!ok && cc->sync)
  963. defer_compaction(zone, cc->order);
  964. }
  965. VM_BUG_ON(!list_empty(&cc->freepages));
  966. VM_BUG_ON(!list_empty(&cc->migratepages));
  967. }
  968. return 0;
  969. }
  970. int compact_pgdat(pg_data_t *pgdat, int order)
  971. {
  972. struct compact_control cc = {
  973. .order = order,
  974. .sync = false,
  975. .page = NULL,
  976. };
  977. return __compact_pgdat(pgdat, &cc);
  978. }
  979. static int compact_node(int nid)
  980. {
  981. struct compact_control cc = {
  982. .order = -1,
  983. .sync = true,
  984. .page = NULL,
  985. };
  986. return __compact_pgdat(NODE_DATA(nid), &cc);
  987. }
  988. /* Compact all nodes in the system */
  989. static int compact_nodes(void)
  990. {
  991. int nid;
  992. /* Flush pending updates to the LRU lists */
  993. lru_add_drain_all();
  994. for_each_online_node(nid)
  995. compact_node(nid);
  996. return COMPACT_COMPLETE;
  997. }
  998. /* The written value is actually unused, all memory is compacted */
  999. int sysctl_compact_memory;
  1000. /* This is the entry point for compacting all nodes via /proc/sys/vm */
  1001. int sysctl_compaction_handler(struct ctl_table *table, int write,
  1002. void __user *buffer, size_t *length, loff_t *ppos)
  1003. {
  1004. if (write)
  1005. return compact_nodes();
  1006. return 0;
  1007. }
  1008. int sysctl_extfrag_handler(struct ctl_table *table, int write,
  1009. void __user *buffer, size_t *length, loff_t *ppos)
  1010. {
  1011. proc_dointvec_minmax(table, write, buffer, length, ppos);
  1012. return 0;
  1013. }
  1014. #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
  1015. ssize_t sysfs_compact_node(struct device *dev,
  1016. struct device_attribute *attr,
  1017. const char *buf, size_t count)
  1018. {
  1019. int nid = dev->id;
  1020. if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  1021. /* Flush pending updates to the LRU lists */
  1022. lru_add_drain_all();
  1023. compact_node(nid);
  1024. }
  1025. return count;
  1026. }
  1027. static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
  1028. int compaction_register_node(struct node *node)
  1029. {
  1030. return device_create_file(&node->dev, &dev_attr_compact);
  1031. }
  1032. void compaction_unregister_node(struct node *node)
  1033. {
  1034. return device_remove_file(&node->dev, &dev_attr_compact);
  1035. }
  1036. #endif /* CONFIG_SYSFS && CONFIG_NUMA */
  1037. #endif /* CONFIG_COMPACTION */