vmscan.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <asm/tlbflush.h>
  42. #include <asm/div64.h>
  43. #include <linux/swapops.h>
  44. #include "internal.h"
  45. struct scan_control {
  46. /* Incremented by the number of inactive pages that were scanned */
  47. unsigned long nr_scanned;
  48. /* This context's GFP mask */
  49. gfp_t gfp_mask;
  50. int may_writepage;
  51. /* Can pages be swapped as part of reclaim? */
  52. int may_swap;
  53. /* This context's SWAP_CLUSTER_MAX. If freeing memory for
  54. * suspend, we effectively ignore SWAP_CLUSTER_MAX.
  55. * In this context, it doesn't matter that we scan the
  56. * whole list at once. */
  57. int swap_cluster_max;
  58. int swappiness;
  59. int all_unreclaimable;
  60. int order;
  61. /* Which cgroup do we reclaim from */
  62. struct mem_cgroup *mem_cgroup;
  63. /* Pluggable isolate pages callback */
  64. unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
  65. unsigned long *scanned, int order, int mode,
  66. struct zone *z, struct mem_cgroup *mem_cont,
  67. int active, int file);
  68. };
  69. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  70. #ifdef ARCH_HAS_PREFETCH
  71. #define prefetch_prev_lru_page(_page, _base, _field) \
  72. do { \
  73. if ((_page)->lru.prev != _base) { \
  74. struct page *prev; \
  75. \
  76. prev = lru_to_page(&(_page->lru)); \
  77. prefetch(&prev->_field); \
  78. } \
  79. } while (0)
  80. #else
  81. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  82. #endif
  83. #ifdef ARCH_HAS_PREFETCHW
  84. #define prefetchw_prev_lru_page(_page, _base, _field) \
  85. do { \
  86. if ((_page)->lru.prev != _base) { \
  87. struct page *prev; \
  88. \
  89. prev = lru_to_page(&(_page->lru)); \
  90. prefetchw(&prev->_field); \
  91. } \
  92. } while (0)
  93. #else
  94. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  95. #endif
  96. /*
  97. * From 0 .. 100. Higher means more swappy.
  98. */
  99. int vm_swappiness = 60;
  100. long vm_total_pages; /* The total number of pages which the VM controls */
  101. static LIST_HEAD(shrinker_list);
  102. static DECLARE_RWSEM(shrinker_rwsem);
  103. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  104. #define scan_global_lru(sc) (!(sc)->mem_cgroup)
  105. #else
  106. #define scan_global_lru(sc) (1)
  107. #endif
  108. /*
  109. * Add a shrinker callback to be called from the vm
  110. */
  111. void register_shrinker(struct shrinker *shrinker)
  112. {
  113. shrinker->nr = 0;
  114. down_write(&shrinker_rwsem);
  115. list_add_tail(&shrinker->list, &shrinker_list);
  116. up_write(&shrinker_rwsem);
  117. }
  118. EXPORT_SYMBOL(register_shrinker);
  119. /*
  120. * Remove one
  121. */
  122. void unregister_shrinker(struct shrinker *shrinker)
  123. {
  124. down_write(&shrinker_rwsem);
  125. list_del(&shrinker->list);
  126. up_write(&shrinker_rwsem);
  127. }
  128. EXPORT_SYMBOL(unregister_shrinker);
  129. #define SHRINK_BATCH 128
  130. /*
  131. * Call the shrink functions to age shrinkable caches
  132. *
  133. * Here we assume it costs one seek to replace a lru page and that it also
  134. * takes a seek to recreate a cache object. With this in mind we age equal
  135. * percentages of the lru and ageable caches. This should balance the seeks
  136. * generated by these structures.
  137. *
  138. * If the vm encountered mapped pages on the LRU it increase the pressure on
  139. * slab to avoid swapping.
  140. *
  141. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  142. *
  143. * `lru_pages' represents the number of on-LRU pages in all the zones which
  144. * are eligible for the caller's allocation attempt. It is used for balancing
  145. * slab reclaim versus page reclaim.
  146. *
  147. * Returns the number of slab objects which we shrunk.
  148. */
  149. unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
  150. unsigned long lru_pages)
  151. {
  152. struct shrinker *shrinker;
  153. unsigned long ret = 0;
  154. if (scanned == 0)
  155. scanned = SWAP_CLUSTER_MAX;
  156. if (!down_read_trylock(&shrinker_rwsem))
  157. return 1; /* Assume we'll be able to shrink next time */
  158. list_for_each_entry(shrinker, &shrinker_list, list) {
  159. unsigned long long delta;
  160. unsigned long total_scan;
  161. unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
  162. delta = (4 * scanned) / shrinker->seeks;
  163. delta *= max_pass;
  164. do_div(delta, lru_pages + 1);
  165. shrinker->nr += delta;
  166. if (shrinker->nr < 0) {
  167. printk(KERN_ERR "%s: nr=%ld\n",
  168. __func__, shrinker->nr);
  169. shrinker->nr = max_pass;
  170. }
  171. /*
  172. * Avoid risking looping forever due to too large nr value:
  173. * never try to free more than twice the estimate number of
  174. * freeable entries.
  175. */
  176. if (shrinker->nr > max_pass * 2)
  177. shrinker->nr = max_pass * 2;
  178. total_scan = shrinker->nr;
  179. shrinker->nr = 0;
  180. while (total_scan >= SHRINK_BATCH) {
  181. long this_scan = SHRINK_BATCH;
  182. int shrink_ret;
  183. int nr_before;
  184. nr_before = (*shrinker->shrink)(0, gfp_mask);
  185. shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
  186. if (shrink_ret == -1)
  187. break;
  188. if (shrink_ret < nr_before)
  189. ret += nr_before - shrink_ret;
  190. count_vm_events(SLABS_SCANNED, this_scan);
  191. total_scan -= this_scan;
  192. cond_resched();
  193. }
  194. shrinker->nr += total_scan;
  195. }
  196. up_read(&shrinker_rwsem);
  197. return ret;
  198. }
  199. /* Called without lock on whether page is mapped, so answer is unstable */
  200. static inline int page_mapping_inuse(struct page *page)
  201. {
  202. struct address_space *mapping;
  203. /* Page is in somebody's page tables. */
  204. if (page_mapped(page))
  205. return 1;
  206. /* Be more reluctant to reclaim swapcache than pagecache */
  207. if (PageSwapCache(page))
  208. return 1;
  209. mapping = page_mapping(page);
  210. if (!mapping)
  211. return 0;
  212. /* File is mmap'd by somebody? */
  213. return mapping_mapped(mapping);
  214. }
  215. static inline int is_page_cache_freeable(struct page *page)
  216. {
  217. return page_count(page) - !!PagePrivate(page) == 2;
  218. }
  219. static int may_write_to_queue(struct backing_dev_info *bdi)
  220. {
  221. if (current->flags & PF_SWAPWRITE)
  222. return 1;
  223. if (!bdi_write_congested(bdi))
  224. return 1;
  225. if (bdi == current->backing_dev_info)
  226. return 1;
  227. return 0;
  228. }
  229. /*
  230. * We detected a synchronous write error writing a page out. Probably
  231. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  232. * fsync(), msync() or close().
  233. *
  234. * The tricky part is that after writepage we cannot touch the mapping: nothing
  235. * prevents it from being freed up. But we have a ref on the page and once
  236. * that page is locked, the mapping is pinned.
  237. *
  238. * We're allowed to run sleeping lock_page() here because we know the caller has
  239. * __GFP_FS.
  240. */
  241. static void handle_write_error(struct address_space *mapping,
  242. struct page *page, int error)
  243. {
  244. lock_page(page);
  245. if (page_mapping(page) == mapping)
  246. mapping_set_error(mapping, error);
  247. unlock_page(page);
  248. }
  249. /* Request for sync pageout. */
  250. enum pageout_io {
  251. PAGEOUT_IO_ASYNC,
  252. PAGEOUT_IO_SYNC,
  253. };
  254. /* possible outcome of pageout() */
  255. typedef enum {
  256. /* failed to write page out, page is locked */
  257. PAGE_KEEP,
  258. /* move page to the active list, page is locked */
  259. PAGE_ACTIVATE,
  260. /* page has been sent to the disk successfully, page is unlocked */
  261. PAGE_SUCCESS,
  262. /* page is clean and locked */
  263. PAGE_CLEAN,
  264. } pageout_t;
  265. /*
  266. * pageout is called by shrink_page_list() for each dirty page.
  267. * Calls ->writepage().
  268. */
  269. static pageout_t pageout(struct page *page, struct address_space *mapping,
  270. enum pageout_io sync_writeback)
  271. {
  272. /*
  273. * If the page is dirty, only perform writeback if that write
  274. * will be non-blocking. To prevent this allocation from being
  275. * stalled by pagecache activity. But note that there may be
  276. * stalls if we need to run get_block(). We could test
  277. * PagePrivate for that.
  278. *
  279. * If this process is currently in generic_file_write() against
  280. * this page's queue, we can perform writeback even if that
  281. * will block.
  282. *
  283. * If the page is swapcache, write it back even if that would
  284. * block, for some throttling. This happens by accident, because
  285. * swap_backing_dev_info is bust: it doesn't reflect the
  286. * congestion state of the swapdevs. Easy to fix, if needed.
  287. * See swapfile.c:page_queue_congested().
  288. */
  289. if (!is_page_cache_freeable(page))
  290. return PAGE_KEEP;
  291. if (!mapping) {
  292. /*
  293. * Some data journaling orphaned pages can have
  294. * page->mapping == NULL while being dirty with clean buffers.
  295. */
  296. if (PagePrivate(page)) {
  297. if (try_to_free_buffers(page)) {
  298. ClearPageDirty(page);
  299. printk("%s: orphaned page\n", __func__);
  300. return PAGE_CLEAN;
  301. }
  302. }
  303. return PAGE_KEEP;
  304. }
  305. if (mapping->a_ops->writepage == NULL)
  306. return PAGE_ACTIVATE;
  307. if (!may_write_to_queue(mapping->backing_dev_info))
  308. return PAGE_KEEP;
  309. if (clear_page_dirty_for_io(page)) {
  310. int res;
  311. struct writeback_control wbc = {
  312. .sync_mode = WB_SYNC_NONE,
  313. .nr_to_write = SWAP_CLUSTER_MAX,
  314. .range_start = 0,
  315. .range_end = LLONG_MAX,
  316. .nonblocking = 1,
  317. .for_reclaim = 1,
  318. };
  319. SetPageReclaim(page);
  320. res = mapping->a_ops->writepage(page, &wbc);
  321. if (res < 0)
  322. handle_write_error(mapping, page, res);
  323. if (res == AOP_WRITEPAGE_ACTIVATE) {
  324. ClearPageReclaim(page);
  325. return PAGE_ACTIVATE;
  326. }
  327. /*
  328. * Wait on writeback if requested to. This happens when
  329. * direct reclaiming a large contiguous area and the
  330. * first attempt to free a range of pages fails.
  331. */
  332. if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
  333. wait_on_page_writeback(page);
  334. if (!PageWriteback(page)) {
  335. /* synchronous write or broken a_ops? */
  336. ClearPageReclaim(page);
  337. }
  338. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  339. return PAGE_SUCCESS;
  340. }
  341. return PAGE_CLEAN;
  342. }
  343. /*
  344. * Same as remove_mapping, but if the page is removed from the mapping, it
  345. * gets returned with a refcount of 0.
  346. */
  347. static int __remove_mapping(struct address_space *mapping, struct page *page)
  348. {
  349. BUG_ON(!PageLocked(page));
  350. BUG_ON(mapping != page_mapping(page));
  351. spin_lock_irq(&mapping->tree_lock);
  352. /*
  353. * The non racy check for a busy page.
  354. *
  355. * Must be careful with the order of the tests. When someone has
  356. * a ref to the page, it may be possible that they dirty it then
  357. * drop the reference. So if PageDirty is tested before page_count
  358. * here, then the following race may occur:
  359. *
  360. * get_user_pages(&page);
  361. * [user mapping goes away]
  362. * write_to(page);
  363. * !PageDirty(page) [good]
  364. * SetPageDirty(page);
  365. * put_page(page);
  366. * !page_count(page) [good, discard it]
  367. *
  368. * [oops, our write_to data is lost]
  369. *
  370. * Reversing the order of the tests ensures such a situation cannot
  371. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  372. * load is not satisfied before that of page->_count.
  373. *
  374. * Note that if SetPageDirty is always performed via set_page_dirty,
  375. * and thus under tree_lock, then this ordering is not required.
  376. */
  377. if (!page_freeze_refs(page, 2))
  378. goto cannot_free;
  379. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  380. if (unlikely(PageDirty(page))) {
  381. page_unfreeze_refs(page, 2);
  382. goto cannot_free;
  383. }
  384. if (PageSwapCache(page)) {
  385. swp_entry_t swap = { .val = page_private(page) };
  386. __delete_from_swap_cache(page);
  387. spin_unlock_irq(&mapping->tree_lock);
  388. swap_free(swap);
  389. } else {
  390. __remove_from_page_cache(page);
  391. spin_unlock_irq(&mapping->tree_lock);
  392. }
  393. return 1;
  394. cannot_free:
  395. spin_unlock_irq(&mapping->tree_lock);
  396. return 0;
  397. }
  398. /*
  399. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  400. * someone else has a ref on the page, abort and return 0. If it was
  401. * successfully detached, return 1. Assumes the caller has a single ref on
  402. * this page.
  403. */
  404. int remove_mapping(struct address_space *mapping, struct page *page)
  405. {
  406. if (__remove_mapping(mapping, page)) {
  407. /*
  408. * Unfreezing the refcount with 1 rather than 2 effectively
  409. * drops the pagecache ref for us without requiring another
  410. * atomic operation.
  411. */
  412. page_unfreeze_refs(page, 1);
  413. return 1;
  414. }
  415. return 0;
  416. }
  417. /*
  418. * shrink_page_list() returns the number of reclaimed pages
  419. */
  420. static unsigned long shrink_page_list(struct list_head *page_list,
  421. struct scan_control *sc,
  422. enum pageout_io sync_writeback)
  423. {
  424. LIST_HEAD(ret_pages);
  425. struct pagevec freed_pvec;
  426. int pgactivate = 0;
  427. unsigned long nr_reclaimed = 0;
  428. cond_resched();
  429. pagevec_init(&freed_pvec, 1);
  430. while (!list_empty(page_list)) {
  431. struct address_space *mapping;
  432. struct page *page;
  433. int may_enter_fs;
  434. int referenced;
  435. cond_resched();
  436. page = lru_to_page(page_list);
  437. list_del(&page->lru);
  438. if (!trylock_page(page))
  439. goto keep;
  440. VM_BUG_ON(PageActive(page));
  441. sc->nr_scanned++;
  442. if (!sc->may_swap && page_mapped(page))
  443. goto keep_locked;
  444. /* Double the slab pressure for mapped and swapcache pages */
  445. if (page_mapped(page) || PageSwapCache(page))
  446. sc->nr_scanned++;
  447. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  448. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  449. if (PageWriteback(page)) {
  450. /*
  451. * Synchronous reclaim is performed in two passes,
  452. * first an asynchronous pass over the list to
  453. * start parallel writeback, and a second synchronous
  454. * pass to wait for the IO to complete. Wait here
  455. * for any page for which writeback has already
  456. * started.
  457. */
  458. if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
  459. wait_on_page_writeback(page);
  460. else
  461. goto keep_locked;
  462. }
  463. referenced = page_referenced(page, 1, sc->mem_cgroup);
  464. /* In active use or really unfreeable? Activate it. */
  465. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
  466. referenced && page_mapping_inuse(page))
  467. goto activate_locked;
  468. #ifdef CONFIG_SWAP
  469. /*
  470. * Anonymous process memory has backing store?
  471. * Try to allocate it some swap space here.
  472. */
  473. if (PageAnon(page) && !PageSwapCache(page))
  474. if (!add_to_swap(page, GFP_ATOMIC))
  475. goto activate_locked;
  476. #endif /* CONFIG_SWAP */
  477. mapping = page_mapping(page);
  478. /*
  479. * The page is mapped into the page tables of one or more
  480. * processes. Try to unmap it here.
  481. */
  482. if (page_mapped(page) && mapping) {
  483. switch (try_to_unmap(page, 0)) {
  484. case SWAP_FAIL:
  485. goto activate_locked;
  486. case SWAP_AGAIN:
  487. goto keep_locked;
  488. case SWAP_SUCCESS:
  489. ; /* try to free the page below */
  490. }
  491. }
  492. if (PageDirty(page)) {
  493. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
  494. goto keep_locked;
  495. if (!may_enter_fs)
  496. goto keep_locked;
  497. if (!sc->may_writepage)
  498. goto keep_locked;
  499. /* Page is dirty, try to write it out here */
  500. switch (pageout(page, mapping, sync_writeback)) {
  501. case PAGE_KEEP:
  502. goto keep_locked;
  503. case PAGE_ACTIVATE:
  504. goto activate_locked;
  505. case PAGE_SUCCESS:
  506. if (PageWriteback(page) || PageDirty(page))
  507. goto keep;
  508. /*
  509. * A synchronous write - probably a ramdisk. Go
  510. * ahead and try to reclaim the page.
  511. */
  512. if (!trylock_page(page))
  513. goto keep;
  514. if (PageDirty(page) || PageWriteback(page))
  515. goto keep_locked;
  516. mapping = page_mapping(page);
  517. case PAGE_CLEAN:
  518. ; /* try to free the page below */
  519. }
  520. }
  521. /*
  522. * If the page has buffers, try to free the buffer mappings
  523. * associated with this page. If we succeed we try to free
  524. * the page as well.
  525. *
  526. * We do this even if the page is PageDirty().
  527. * try_to_release_page() does not perform I/O, but it is
  528. * possible for a page to have PageDirty set, but it is actually
  529. * clean (all its buffers are clean). This happens if the
  530. * buffers were written out directly, with submit_bh(). ext3
  531. * will do this, as well as the blockdev mapping.
  532. * try_to_release_page() will discover that cleanness and will
  533. * drop the buffers and mark the page clean - it can be freed.
  534. *
  535. * Rarely, pages can have buffers and no ->mapping. These are
  536. * the pages which were not successfully invalidated in
  537. * truncate_complete_page(). We try to drop those buffers here
  538. * and if that worked, and the page is no longer mapped into
  539. * process address space (page_count == 1) it can be freed.
  540. * Otherwise, leave the page on the LRU so it is swappable.
  541. */
  542. if (PagePrivate(page)) {
  543. if (!try_to_release_page(page, sc->gfp_mask))
  544. goto activate_locked;
  545. if (!mapping && page_count(page) == 1) {
  546. unlock_page(page);
  547. if (put_page_testzero(page))
  548. goto free_it;
  549. else {
  550. /*
  551. * rare race with speculative reference.
  552. * the speculative reference will free
  553. * this page shortly, so we may
  554. * increment nr_reclaimed here (and
  555. * leave it off the LRU).
  556. */
  557. nr_reclaimed++;
  558. continue;
  559. }
  560. }
  561. }
  562. if (!mapping || !__remove_mapping(mapping, page))
  563. goto keep_locked;
  564. unlock_page(page);
  565. free_it:
  566. nr_reclaimed++;
  567. if (!pagevec_add(&freed_pvec, page)) {
  568. __pagevec_free(&freed_pvec);
  569. pagevec_reinit(&freed_pvec);
  570. }
  571. continue;
  572. activate_locked:
  573. /* Not a candidate for swapping, so reclaim swap space. */
  574. if (PageSwapCache(page) && vm_swap_full())
  575. remove_exclusive_swap_page_ref(page);
  576. SetPageActive(page);
  577. pgactivate++;
  578. keep_locked:
  579. unlock_page(page);
  580. keep:
  581. list_add(&page->lru, &ret_pages);
  582. VM_BUG_ON(PageLRU(page));
  583. }
  584. list_splice(&ret_pages, page_list);
  585. if (pagevec_count(&freed_pvec))
  586. __pagevec_free(&freed_pvec);
  587. count_vm_events(PGACTIVATE, pgactivate);
  588. return nr_reclaimed;
  589. }
  590. /* LRU Isolation modes. */
  591. #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
  592. #define ISOLATE_ACTIVE 1 /* Isolate active pages. */
  593. #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
  594. /*
  595. * Attempt to remove the specified page from its LRU. Only take this page
  596. * if it is of the appropriate PageActive status. Pages which are being
  597. * freed elsewhere are also ignored.
  598. *
  599. * page: page to consider
  600. * mode: one of the LRU isolation modes defined above
  601. *
  602. * returns 0 on success, -ve errno on failure.
  603. */
  604. int __isolate_lru_page(struct page *page, int mode, int file)
  605. {
  606. int ret = -EINVAL;
  607. /* Only take pages on the LRU. */
  608. if (!PageLRU(page))
  609. return ret;
  610. /*
  611. * When checking the active state, we need to be sure we are
  612. * dealing with comparible boolean values. Take the logical not
  613. * of each.
  614. */
  615. if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
  616. return ret;
  617. if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
  618. return ret;
  619. ret = -EBUSY;
  620. if (likely(get_page_unless_zero(page))) {
  621. /*
  622. * Be careful not to clear PageLRU until after we're
  623. * sure the page is not being freed elsewhere -- the
  624. * page release code relies on it.
  625. */
  626. ClearPageLRU(page);
  627. ret = 0;
  628. }
  629. return ret;
  630. }
  631. /*
  632. * zone->lru_lock is heavily contended. Some of the functions that
  633. * shrink the lists perform better by taking out a batch of pages
  634. * and working on them outside the LRU lock.
  635. *
  636. * For pagecache intensive workloads, this function is the hottest
  637. * spot in the kernel (apart from copy_*_user functions).
  638. *
  639. * Appropriate locks must be held before calling this function.
  640. *
  641. * @nr_to_scan: The number of pages to look through on the list.
  642. * @src: The LRU list to pull pages off.
  643. * @dst: The temp list to put pages on to.
  644. * @scanned: The number of pages that were scanned.
  645. * @order: The caller's attempted allocation order
  646. * @mode: One of the LRU isolation modes
  647. * @file: True [1] if isolating file [!anon] pages
  648. *
  649. * returns how many pages were moved onto *@dst.
  650. */
  651. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  652. struct list_head *src, struct list_head *dst,
  653. unsigned long *scanned, int order, int mode, int file)
  654. {
  655. unsigned long nr_taken = 0;
  656. unsigned long scan;
  657. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  658. struct page *page;
  659. unsigned long pfn;
  660. unsigned long end_pfn;
  661. unsigned long page_pfn;
  662. int zone_id;
  663. page = lru_to_page(src);
  664. prefetchw_prev_lru_page(page, src, flags);
  665. VM_BUG_ON(!PageLRU(page));
  666. switch (__isolate_lru_page(page, mode, file)) {
  667. case 0:
  668. list_move(&page->lru, dst);
  669. nr_taken++;
  670. break;
  671. case -EBUSY:
  672. /* else it is being freed elsewhere */
  673. list_move(&page->lru, src);
  674. continue;
  675. default:
  676. BUG();
  677. }
  678. if (!order)
  679. continue;
  680. /*
  681. * Attempt to take all pages in the order aligned region
  682. * surrounding the tag page. Only take those pages of
  683. * the same active state as that tag page. We may safely
  684. * round the target page pfn down to the requested order
  685. * as the mem_map is guarenteed valid out to MAX_ORDER,
  686. * where that page is in a different zone we will detect
  687. * it from its zone id and abort this block scan.
  688. */
  689. zone_id = page_zone_id(page);
  690. page_pfn = page_to_pfn(page);
  691. pfn = page_pfn & ~((1 << order) - 1);
  692. end_pfn = pfn + (1 << order);
  693. for (; pfn < end_pfn; pfn++) {
  694. struct page *cursor_page;
  695. /* The target page is in the block, ignore it. */
  696. if (unlikely(pfn == page_pfn))
  697. continue;
  698. /* Avoid holes within the zone. */
  699. if (unlikely(!pfn_valid_within(pfn)))
  700. break;
  701. cursor_page = pfn_to_page(pfn);
  702. /* Check that we have not crossed a zone boundary. */
  703. if (unlikely(page_zone_id(cursor_page) != zone_id))
  704. continue;
  705. switch (__isolate_lru_page(cursor_page, mode, file)) {
  706. case 0:
  707. list_move(&cursor_page->lru, dst);
  708. nr_taken++;
  709. scan++;
  710. break;
  711. case -EBUSY:
  712. /* else it is being freed elsewhere */
  713. list_move(&cursor_page->lru, src);
  714. default:
  715. break;
  716. }
  717. }
  718. }
  719. *scanned = scan;
  720. return nr_taken;
  721. }
  722. static unsigned long isolate_pages_global(unsigned long nr,
  723. struct list_head *dst,
  724. unsigned long *scanned, int order,
  725. int mode, struct zone *z,
  726. struct mem_cgroup *mem_cont,
  727. int active, int file)
  728. {
  729. int lru = LRU_BASE;
  730. if (active)
  731. lru += LRU_ACTIVE;
  732. if (file)
  733. lru += LRU_FILE;
  734. return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
  735. mode, !!file);
  736. }
  737. /*
  738. * clear_active_flags() is a helper for shrink_active_list(), clearing
  739. * any active bits from the pages in the list.
  740. */
  741. static unsigned long clear_active_flags(struct list_head *page_list,
  742. unsigned int *count)
  743. {
  744. int nr_active = 0;
  745. int lru;
  746. struct page *page;
  747. list_for_each_entry(page, page_list, lru) {
  748. lru = page_is_file_cache(page);
  749. if (PageActive(page)) {
  750. lru += LRU_ACTIVE;
  751. ClearPageActive(page);
  752. nr_active++;
  753. }
  754. count[lru]++;
  755. }
  756. return nr_active;
  757. }
  758. /**
  759. * isolate_lru_page - tries to isolate a page from its LRU list
  760. * @page: page to isolate from its LRU list
  761. *
  762. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  763. * vmstat statistic corresponding to whatever LRU list the page was on.
  764. *
  765. * Returns 0 if the page was removed from an LRU list.
  766. * Returns -EBUSY if the page was not on an LRU list.
  767. *
  768. * The returned page will have PageLRU() cleared. If it was found on
  769. * the active list, it will have PageActive set. That flag may need
  770. * to be cleared by the caller before letting the page go.
  771. *
  772. * The vmstat statistic corresponding to the list on which the page was
  773. * found will be decremented.
  774. *
  775. * Restrictions:
  776. * (1) Must be called with an elevated refcount on the page. This is a
  777. * fundamentnal difference from isolate_lru_pages (which is called
  778. * without a stable reference).
  779. * (2) the lru_lock must not be held.
  780. * (3) interrupts must be enabled.
  781. */
  782. int isolate_lru_page(struct page *page)
  783. {
  784. int ret = -EBUSY;
  785. if (PageLRU(page)) {
  786. struct zone *zone = page_zone(page);
  787. spin_lock_irq(&zone->lru_lock);
  788. if (PageLRU(page) && get_page_unless_zero(page)) {
  789. int lru = LRU_BASE;
  790. ret = 0;
  791. ClearPageLRU(page);
  792. lru += page_is_file_cache(page) + !!PageActive(page);
  793. del_page_from_lru_list(zone, page, lru);
  794. }
  795. spin_unlock_irq(&zone->lru_lock);
  796. }
  797. return ret;
  798. }
  799. /*
  800. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  801. * of reclaimed pages
  802. */
  803. static unsigned long shrink_inactive_list(unsigned long max_scan,
  804. struct zone *zone, struct scan_control *sc,
  805. int priority, int file)
  806. {
  807. LIST_HEAD(page_list);
  808. struct pagevec pvec;
  809. unsigned long nr_scanned = 0;
  810. unsigned long nr_reclaimed = 0;
  811. pagevec_init(&pvec, 1);
  812. lru_add_drain();
  813. spin_lock_irq(&zone->lru_lock);
  814. do {
  815. struct page *page;
  816. unsigned long nr_taken;
  817. unsigned long nr_scan;
  818. unsigned long nr_freed;
  819. unsigned long nr_active;
  820. unsigned int count[NR_LRU_LISTS] = { 0, };
  821. int mode = ISOLATE_INACTIVE;
  822. /*
  823. * If we need a large contiguous chunk of memory, or have
  824. * trouble getting a small set of contiguous pages, we
  825. * will reclaim both active and inactive pages.
  826. *
  827. * We use the same threshold as pageout congestion_wait below.
  828. */
  829. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  830. mode = ISOLATE_BOTH;
  831. else if (sc->order && priority < DEF_PRIORITY - 2)
  832. mode = ISOLATE_BOTH;
  833. nr_taken = sc->isolate_pages(sc->swap_cluster_max,
  834. &page_list, &nr_scan, sc->order, mode,
  835. zone, sc->mem_cgroup, 0, file);
  836. nr_active = clear_active_flags(&page_list, count);
  837. __count_vm_events(PGDEACTIVATE, nr_active);
  838. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  839. -count[LRU_ACTIVE_FILE]);
  840. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  841. -count[LRU_INACTIVE_FILE]);
  842. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  843. -count[LRU_ACTIVE_ANON]);
  844. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  845. -count[LRU_INACTIVE_ANON]);
  846. if (scan_global_lru(sc)) {
  847. zone->pages_scanned += nr_scan;
  848. zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
  849. zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
  850. zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
  851. zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
  852. }
  853. spin_unlock_irq(&zone->lru_lock);
  854. nr_scanned += nr_scan;
  855. nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
  856. /*
  857. * If we are direct reclaiming for contiguous pages and we do
  858. * not reclaim everything in the list, try again and wait
  859. * for IO to complete. This will stall high-order allocations
  860. * but that should be acceptable to the caller
  861. */
  862. if (nr_freed < nr_taken && !current_is_kswapd() &&
  863. sc->order > PAGE_ALLOC_COSTLY_ORDER) {
  864. congestion_wait(WRITE, HZ/10);
  865. /*
  866. * The attempt at page out may have made some
  867. * of the pages active, mark them inactive again.
  868. */
  869. nr_active = clear_active_flags(&page_list, count);
  870. count_vm_events(PGDEACTIVATE, nr_active);
  871. nr_freed += shrink_page_list(&page_list, sc,
  872. PAGEOUT_IO_SYNC);
  873. }
  874. nr_reclaimed += nr_freed;
  875. local_irq_disable();
  876. if (current_is_kswapd()) {
  877. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
  878. __count_vm_events(KSWAPD_STEAL, nr_freed);
  879. } else if (scan_global_lru(sc))
  880. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
  881. __count_zone_vm_events(PGSTEAL, zone, nr_freed);
  882. if (nr_taken == 0)
  883. goto done;
  884. spin_lock(&zone->lru_lock);
  885. /*
  886. * Put back any unfreeable pages.
  887. */
  888. while (!list_empty(&page_list)) {
  889. page = lru_to_page(&page_list);
  890. VM_BUG_ON(PageLRU(page));
  891. SetPageLRU(page);
  892. list_del(&page->lru);
  893. add_page_to_lru_list(zone, page, page_lru(page));
  894. if (PageActive(page) && scan_global_lru(sc)) {
  895. int file = !!page_is_file_cache(page);
  896. zone->recent_rotated[file]++;
  897. }
  898. if (!pagevec_add(&pvec, page)) {
  899. spin_unlock_irq(&zone->lru_lock);
  900. __pagevec_release(&pvec);
  901. spin_lock_irq(&zone->lru_lock);
  902. }
  903. }
  904. } while (nr_scanned < max_scan);
  905. spin_unlock(&zone->lru_lock);
  906. done:
  907. local_irq_enable();
  908. pagevec_release(&pvec);
  909. return nr_reclaimed;
  910. }
  911. /*
  912. * We are about to scan this zone at a certain priority level. If that priority
  913. * level is smaller (ie: more urgent) than the previous priority, then note
  914. * that priority level within the zone. This is done so that when the next
  915. * process comes in to scan this zone, it will immediately start out at this
  916. * priority level rather than having to build up its own scanning priority.
  917. * Here, this priority affects only the reclaim-mapped threshold.
  918. */
  919. static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  920. {
  921. if (priority < zone->prev_priority)
  922. zone->prev_priority = priority;
  923. }
  924. static inline int zone_is_near_oom(struct zone *zone)
  925. {
  926. return zone->pages_scanned >= (zone_lru_pages(zone) * 3);
  927. }
  928. /*
  929. * This moves pages from the active list to the inactive list.
  930. *
  931. * We move them the other way if the page is referenced by one or more
  932. * processes, from rmap.
  933. *
  934. * If the pages are mostly unmapped, the processing is fast and it is
  935. * appropriate to hold zone->lru_lock across the whole operation. But if
  936. * the pages are mapped, the processing is slow (page_referenced()) so we
  937. * should drop zone->lru_lock around each page. It's impossible to balance
  938. * this, so instead we remove the pages from the LRU while processing them.
  939. * It is safe to rely on PG_active against the non-LRU pages in here because
  940. * nobody will play with that bit on a non-LRU page.
  941. *
  942. * The downside is that we have to touch page->_count against each page.
  943. * But we had to alter page->flags anyway.
  944. */
  945. static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  946. struct scan_control *sc, int priority, int file)
  947. {
  948. unsigned long pgmoved;
  949. int pgdeactivate = 0;
  950. unsigned long pgscanned;
  951. LIST_HEAD(l_hold); /* The pages which were snipped off */
  952. LIST_HEAD(l_inactive);
  953. struct page *page;
  954. struct pagevec pvec;
  955. enum lru_list lru;
  956. lru_add_drain();
  957. spin_lock_irq(&zone->lru_lock);
  958. pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
  959. ISOLATE_ACTIVE, zone,
  960. sc->mem_cgroup, 1, file);
  961. /*
  962. * zone->pages_scanned is used for detect zone's oom
  963. * mem_cgroup remembers nr_scan by itself.
  964. */
  965. if (scan_global_lru(sc)) {
  966. zone->pages_scanned += pgscanned;
  967. zone->recent_scanned[!!file] += pgmoved;
  968. }
  969. if (file)
  970. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
  971. else
  972. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
  973. spin_unlock_irq(&zone->lru_lock);
  974. pgmoved = 0;
  975. while (!list_empty(&l_hold)) {
  976. cond_resched();
  977. page = lru_to_page(&l_hold);
  978. list_del(&page->lru);
  979. /* page_referenced clears PageReferenced */
  980. if (page_mapping_inuse(page) &&
  981. page_referenced(page, 0, sc->mem_cgroup))
  982. pgmoved++;
  983. list_add(&page->lru, &l_inactive);
  984. }
  985. /*
  986. * Count referenced pages from currently used mappings as
  987. * rotated, even though they are moved to the inactive list.
  988. * This helps balance scan pressure between file and anonymous
  989. * pages in get_scan_ratio.
  990. */
  991. zone->recent_rotated[!!file] += pgmoved;
  992. /*
  993. * Move the pages to the [file or anon] inactive list.
  994. */
  995. pagevec_init(&pvec, 1);
  996. pgmoved = 0;
  997. lru = LRU_BASE + file * LRU_FILE;
  998. spin_lock_irq(&zone->lru_lock);
  999. while (!list_empty(&l_inactive)) {
  1000. page = lru_to_page(&l_inactive);
  1001. prefetchw_prev_lru_page(page, &l_inactive, flags);
  1002. VM_BUG_ON(PageLRU(page));
  1003. SetPageLRU(page);
  1004. VM_BUG_ON(!PageActive(page));
  1005. ClearPageActive(page);
  1006. list_move(&page->lru, &zone->lru[lru].list);
  1007. mem_cgroup_move_lists(page, false);
  1008. pgmoved++;
  1009. if (!pagevec_add(&pvec, page)) {
  1010. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1011. spin_unlock_irq(&zone->lru_lock);
  1012. pgdeactivate += pgmoved;
  1013. pgmoved = 0;
  1014. if (buffer_heads_over_limit)
  1015. pagevec_strip(&pvec);
  1016. __pagevec_release(&pvec);
  1017. spin_lock_irq(&zone->lru_lock);
  1018. }
  1019. }
  1020. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1021. pgdeactivate += pgmoved;
  1022. if (buffer_heads_over_limit) {
  1023. spin_unlock_irq(&zone->lru_lock);
  1024. pagevec_strip(&pvec);
  1025. spin_lock_irq(&zone->lru_lock);
  1026. }
  1027. __count_zone_vm_events(PGREFILL, zone, pgscanned);
  1028. __count_vm_events(PGDEACTIVATE, pgdeactivate);
  1029. spin_unlock_irq(&zone->lru_lock);
  1030. if (vm_swap_full())
  1031. pagevec_swap_free(&pvec);
  1032. pagevec_release(&pvec);
  1033. }
  1034. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1035. struct zone *zone, struct scan_control *sc, int priority)
  1036. {
  1037. int file = is_file_lru(lru);
  1038. if (lru == LRU_ACTIVE_FILE) {
  1039. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1040. return 0;
  1041. }
  1042. if (lru == LRU_ACTIVE_ANON &&
  1043. (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
  1044. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1045. return 0;
  1046. }
  1047. return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
  1048. }
  1049. /*
  1050. * Determine how aggressively the anon and file LRU lists should be
  1051. * scanned. The relative value of each set of LRU lists is determined
  1052. * by looking at the fraction of the pages scanned we did rotate back
  1053. * onto the active list instead of evict.
  1054. *
  1055. * percent[0] specifies how much pressure to put on ram/swap backed
  1056. * memory, while percent[1] determines pressure on the file LRUs.
  1057. */
  1058. static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
  1059. unsigned long *percent)
  1060. {
  1061. unsigned long anon, file, free;
  1062. unsigned long anon_prio, file_prio;
  1063. unsigned long ap, fp;
  1064. anon = zone_page_state(zone, NR_ACTIVE_ANON) +
  1065. zone_page_state(zone, NR_INACTIVE_ANON);
  1066. file = zone_page_state(zone, NR_ACTIVE_FILE) +
  1067. zone_page_state(zone, NR_INACTIVE_FILE);
  1068. free = zone_page_state(zone, NR_FREE_PAGES);
  1069. /* If we have no swap space, do not bother scanning anon pages. */
  1070. if (nr_swap_pages <= 0) {
  1071. percent[0] = 0;
  1072. percent[1] = 100;
  1073. return;
  1074. }
  1075. /* If we have very few page cache pages, force-scan anon pages. */
  1076. if (unlikely(file + free <= zone->pages_high)) {
  1077. percent[0] = 100;
  1078. percent[1] = 0;
  1079. return;
  1080. }
  1081. /*
  1082. * OK, so we have swap space and a fair amount of page cache
  1083. * pages. We use the recently rotated / recently scanned
  1084. * ratios to determine how valuable each cache is.
  1085. *
  1086. * Because workloads change over time (and to avoid overflow)
  1087. * we keep these statistics as a floating average, which ends
  1088. * up weighing recent references more than old ones.
  1089. *
  1090. * anon in [0], file in [1]
  1091. */
  1092. if (unlikely(zone->recent_scanned[0] > anon / 4)) {
  1093. spin_lock_irq(&zone->lru_lock);
  1094. zone->recent_scanned[0] /= 2;
  1095. zone->recent_rotated[0] /= 2;
  1096. spin_unlock_irq(&zone->lru_lock);
  1097. }
  1098. if (unlikely(zone->recent_scanned[1] > file / 4)) {
  1099. spin_lock_irq(&zone->lru_lock);
  1100. zone->recent_scanned[1] /= 2;
  1101. zone->recent_rotated[1] /= 2;
  1102. spin_unlock_irq(&zone->lru_lock);
  1103. }
  1104. /*
  1105. * With swappiness at 100, anonymous and file have the same priority.
  1106. * This scanning priority is essentially the inverse of IO cost.
  1107. */
  1108. anon_prio = sc->swappiness;
  1109. file_prio = 200 - sc->swappiness;
  1110. /*
  1111. * anon recent_rotated[0]
  1112. * %anon = 100 * ----------- / ----------------- * IO cost
  1113. * anon + file rotate_sum
  1114. */
  1115. ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
  1116. ap /= zone->recent_rotated[0] + 1;
  1117. fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
  1118. fp /= zone->recent_rotated[1] + 1;
  1119. /* Normalize to percentages */
  1120. percent[0] = 100 * ap / (ap + fp + 1);
  1121. percent[1] = 100 - percent[0];
  1122. }
  1123. /*
  1124. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1125. */
  1126. static unsigned long shrink_zone(int priority, struct zone *zone,
  1127. struct scan_control *sc)
  1128. {
  1129. unsigned long nr[NR_LRU_LISTS];
  1130. unsigned long nr_to_scan;
  1131. unsigned long nr_reclaimed = 0;
  1132. unsigned long percent[2]; /* anon @ 0; file @ 1 */
  1133. enum lru_list l;
  1134. get_scan_ratio(zone, sc, percent);
  1135. for_each_lru(l) {
  1136. if (scan_global_lru(sc)) {
  1137. int file = is_file_lru(l);
  1138. int scan;
  1139. /*
  1140. * Add one to nr_to_scan just to make sure that the
  1141. * kernel will slowly sift through each list.
  1142. */
  1143. scan = zone_page_state(zone, NR_LRU_BASE + l);
  1144. if (priority) {
  1145. scan >>= priority;
  1146. scan = (scan * percent[file]) / 100;
  1147. }
  1148. zone->lru[l].nr_scan += scan + 1;
  1149. nr[l] = zone->lru[l].nr_scan;
  1150. if (nr[l] >= sc->swap_cluster_max)
  1151. zone->lru[l].nr_scan = 0;
  1152. else
  1153. nr[l] = 0;
  1154. } else {
  1155. /*
  1156. * This reclaim occurs not because zone memory shortage
  1157. * but because memory controller hits its limit.
  1158. * Don't modify zone reclaim related data.
  1159. */
  1160. nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
  1161. priority, l);
  1162. }
  1163. }
  1164. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1165. nr[LRU_INACTIVE_FILE]) {
  1166. for_each_lru(l) {
  1167. if (nr[l]) {
  1168. nr_to_scan = min(nr[l],
  1169. (unsigned long)sc->swap_cluster_max);
  1170. nr[l] -= nr_to_scan;
  1171. nr_reclaimed += shrink_list(l, nr_to_scan,
  1172. zone, sc, priority);
  1173. }
  1174. }
  1175. }
  1176. /*
  1177. * Even if we did not try to evict anon pages at all, we want to
  1178. * rebalance the anon lru active/inactive ratio.
  1179. */
  1180. if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
  1181. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1182. else if (!scan_global_lru(sc))
  1183. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1184. throttle_vm_writeout(sc->gfp_mask);
  1185. return nr_reclaimed;
  1186. }
  1187. /*
  1188. * This is the direct reclaim path, for page-allocating processes. We only
  1189. * try to reclaim pages from zones which will satisfy the caller's allocation
  1190. * request.
  1191. *
  1192. * We reclaim from a zone even if that zone is over pages_high. Because:
  1193. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1194. * allocation or
  1195. * b) The zones may be over pages_high but they must go *over* pages_high to
  1196. * satisfy the `incremental min' zone defense algorithm.
  1197. *
  1198. * Returns the number of reclaimed pages.
  1199. *
  1200. * If a zone is deemed to be full of pinned pages then just give it a light
  1201. * scan then give up on it.
  1202. */
  1203. static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
  1204. struct scan_control *sc)
  1205. {
  1206. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1207. unsigned long nr_reclaimed = 0;
  1208. struct zoneref *z;
  1209. struct zone *zone;
  1210. sc->all_unreclaimable = 1;
  1211. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1212. if (!populated_zone(zone))
  1213. continue;
  1214. /*
  1215. * Take care memory controller reclaiming has small influence
  1216. * to global LRU.
  1217. */
  1218. if (scan_global_lru(sc)) {
  1219. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1220. continue;
  1221. note_zone_scanning_priority(zone, priority);
  1222. if (zone_is_all_unreclaimable(zone) &&
  1223. priority != DEF_PRIORITY)
  1224. continue; /* Let kswapd poll it */
  1225. sc->all_unreclaimable = 0;
  1226. } else {
  1227. /*
  1228. * Ignore cpuset limitation here. We just want to reduce
  1229. * # of used pages by us regardless of memory shortage.
  1230. */
  1231. sc->all_unreclaimable = 0;
  1232. mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
  1233. priority);
  1234. }
  1235. nr_reclaimed += shrink_zone(priority, zone, sc);
  1236. }
  1237. return nr_reclaimed;
  1238. }
  1239. /*
  1240. * This is the main entry point to direct page reclaim.
  1241. *
  1242. * If a full scan of the inactive list fails to free enough memory then we
  1243. * are "out of memory" and something needs to be killed.
  1244. *
  1245. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1246. * high - the zone may be full of dirty or under-writeback pages, which this
  1247. * caller can't do much about. We kick pdflush and take explicit naps in the
  1248. * hope that some of these pages can be written. But if the allocating task
  1249. * holds filesystem locks which prevent writeout this might not work, and the
  1250. * allocation attempt will fail.
  1251. *
  1252. * returns: 0, if no pages reclaimed
  1253. * else, the number of pages reclaimed
  1254. */
  1255. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1256. struct scan_control *sc)
  1257. {
  1258. int priority;
  1259. unsigned long ret = 0;
  1260. unsigned long total_scanned = 0;
  1261. unsigned long nr_reclaimed = 0;
  1262. struct reclaim_state *reclaim_state = current->reclaim_state;
  1263. unsigned long lru_pages = 0;
  1264. struct zoneref *z;
  1265. struct zone *zone;
  1266. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1267. delayacct_freepages_start();
  1268. if (scan_global_lru(sc))
  1269. count_vm_event(ALLOCSTALL);
  1270. /*
  1271. * mem_cgroup will not do shrink_slab.
  1272. */
  1273. if (scan_global_lru(sc)) {
  1274. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1275. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1276. continue;
  1277. lru_pages += zone_lru_pages(zone);
  1278. }
  1279. }
  1280. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1281. sc->nr_scanned = 0;
  1282. if (!priority)
  1283. disable_swap_token();
  1284. nr_reclaimed += shrink_zones(priority, zonelist, sc);
  1285. /*
  1286. * Don't shrink slabs when reclaiming memory from
  1287. * over limit cgroups
  1288. */
  1289. if (scan_global_lru(sc)) {
  1290. shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
  1291. if (reclaim_state) {
  1292. nr_reclaimed += reclaim_state->reclaimed_slab;
  1293. reclaim_state->reclaimed_slab = 0;
  1294. }
  1295. }
  1296. total_scanned += sc->nr_scanned;
  1297. if (nr_reclaimed >= sc->swap_cluster_max) {
  1298. ret = nr_reclaimed;
  1299. goto out;
  1300. }
  1301. /*
  1302. * Try to write back as many pages as we just scanned. This
  1303. * tends to cause slow streaming writers to write data to the
  1304. * disk smoothly, at the dirtying rate, which is nice. But
  1305. * that's undesirable in laptop mode, where we *want* lumpy
  1306. * writeout. So in laptop mode, write out the whole world.
  1307. */
  1308. if (total_scanned > sc->swap_cluster_max +
  1309. sc->swap_cluster_max / 2) {
  1310. wakeup_pdflush(laptop_mode ? 0 : total_scanned);
  1311. sc->may_writepage = 1;
  1312. }
  1313. /* Take a nap, wait for some writeback to complete */
  1314. if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
  1315. congestion_wait(WRITE, HZ/10);
  1316. }
  1317. /* top priority shrink_zones still had more to do? don't OOM, then */
  1318. if (!sc->all_unreclaimable && scan_global_lru(sc))
  1319. ret = nr_reclaimed;
  1320. out:
  1321. /*
  1322. * Now that we've scanned all the zones at this priority level, note
  1323. * that level within the zone so that the next thread which performs
  1324. * scanning of this zone will immediately start out at this priority
  1325. * level. This affects only the decision whether or not to bring
  1326. * mapped pages onto the inactive list.
  1327. */
  1328. if (priority < 0)
  1329. priority = 0;
  1330. if (scan_global_lru(sc)) {
  1331. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1332. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1333. continue;
  1334. zone->prev_priority = priority;
  1335. }
  1336. } else
  1337. mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
  1338. delayacct_freepages_end();
  1339. return ret;
  1340. }
  1341. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1342. gfp_t gfp_mask)
  1343. {
  1344. struct scan_control sc = {
  1345. .gfp_mask = gfp_mask,
  1346. .may_writepage = !laptop_mode,
  1347. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1348. .may_swap = 1,
  1349. .swappiness = vm_swappiness,
  1350. .order = order,
  1351. .mem_cgroup = NULL,
  1352. .isolate_pages = isolate_pages_global,
  1353. };
  1354. return do_try_to_free_pages(zonelist, &sc);
  1355. }
  1356. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1357. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  1358. gfp_t gfp_mask)
  1359. {
  1360. struct scan_control sc = {
  1361. .may_writepage = !laptop_mode,
  1362. .may_swap = 1,
  1363. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1364. .swappiness = vm_swappiness,
  1365. .order = 0,
  1366. .mem_cgroup = mem_cont,
  1367. .isolate_pages = mem_cgroup_isolate_pages,
  1368. };
  1369. struct zonelist *zonelist;
  1370. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1371. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1372. zonelist = NODE_DATA(numa_node_id())->node_zonelists;
  1373. return do_try_to_free_pages(zonelist, &sc);
  1374. }
  1375. #endif
  1376. /*
  1377. * For kswapd, balance_pgdat() will work across all this node's zones until
  1378. * they are all at pages_high.
  1379. *
  1380. * Returns the number of pages which were actually freed.
  1381. *
  1382. * There is special handling here for zones which are full of pinned pages.
  1383. * This can happen if the pages are all mlocked, or if they are all used by
  1384. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  1385. * What we do is to detect the case where all pages in the zone have been
  1386. * scanned twice and there has been zero successful reclaim. Mark the zone as
  1387. * dead and from now on, only perform a short scan. Basically we're polling
  1388. * the zone for when the problem goes away.
  1389. *
  1390. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  1391. * zones which have free_pages > pages_high, but once a zone is found to have
  1392. * free_pages <= pages_high, we scan that zone and the lower zones regardless
  1393. * of the number of free pages in the lower zones. This interoperates with
  1394. * the page allocator fallback scheme to ensure that aging of pages is balanced
  1395. * across the zones.
  1396. */
  1397. static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
  1398. {
  1399. int all_zones_ok;
  1400. int priority;
  1401. int i;
  1402. unsigned long total_scanned;
  1403. unsigned long nr_reclaimed;
  1404. struct reclaim_state *reclaim_state = current->reclaim_state;
  1405. struct scan_control sc = {
  1406. .gfp_mask = GFP_KERNEL,
  1407. .may_swap = 1,
  1408. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1409. .swappiness = vm_swappiness,
  1410. .order = order,
  1411. .mem_cgroup = NULL,
  1412. .isolate_pages = isolate_pages_global,
  1413. };
  1414. /*
  1415. * temp_priority is used to remember the scanning priority at which
  1416. * this zone was successfully refilled to free_pages == pages_high.
  1417. */
  1418. int temp_priority[MAX_NR_ZONES];
  1419. loop_again:
  1420. total_scanned = 0;
  1421. nr_reclaimed = 0;
  1422. sc.may_writepage = !laptop_mode;
  1423. count_vm_event(PAGEOUTRUN);
  1424. for (i = 0; i < pgdat->nr_zones; i++)
  1425. temp_priority[i] = DEF_PRIORITY;
  1426. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1427. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  1428. unsigned long lru_pages = 0;
  1429. /* The swap token gets in the way of swapout... */
  1430. if (!priority)
  1431. disable_swap_token();
  1432. all_zones_ok = 1;
  1433. /*
  1434. * Scan in the highmem->dma direction for the highest
  1435. * zone which needs scanning
  1436. */
  1437. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  1438. struct zone *zone = pgdat->node_zones + i;
  1439. if (!populated_zone(zone))
  1440. continue;
  1441. if (zone_is_all_unreclaimable(zone) &&
  1442. priority != DEF_PRIORITY)
  1443. continue;
  1444. /*
  1445. * Do some background aging of the anon list, to give
  1446. * pages a chance to be referenced before reclaiming.
  1447. */
  1448. if (inactive_anon_is_low(zone))
  1449. shrink_active_list(SWAP_CLUSTER_MAX, zone,
  1450. &sc, priority, 0);
  1451. if (!zone_watermark_ok(zone, order, zone->pages_high,
  1452. 0, 0)) {
  1453. end_zone = i;
  1454. break;
  1455. }
  1456. }
  1457. if (i < 0)
  1458. goto out;
  1459. for (i = 0; i <= end_zone; i++) {
  1460. struct zone *zone = pgdat->node_zones + i;
  1461. lru_pages += zone_lru_pages(zone);
  1462. }
  1463. /*
  1464. * Now scan the zone in the dma->highmem direction, stopping
  1465. * at the last zone which needs scanning.
  1466. *
  1467. * We do this because the page allocator works in the opposite
  1468. * direction. This prevents the page allocator from allocating
  1469. * pages behind kswapd's direction of progress, which would
  1470. * cause too much scanning of the lower zones.
  1471. */
  1472. for (i = 0; i <= end_zone; i++) {
  1473. struct zone *zone = pgdat->node_zones + i;
  1474. int nr_slab;
  1475. if (!populated_zone(zone))
  1476. continue;
  1477. if (zone_is_all_unreclaimable(zone) &&
  1478. priority != DEF_PRIORITY)
  1479. continue;
  1480. if (!zone_watermark_ok(zone, order, zone->pages_high,
  1481. end_zone, 0))
  1482. all_zones_ok = 0;
  1483. temp_priority[i] = priority;
  1484. sc.nr_scanned = 0;
  1485. note_zone_scanning_priority(zone, priority);
  1486. /*
  1487. * We put equal pressure on every zone, unless one
  1488. * zone has way too many pages free already.
  1489. */
  1490. if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
  1491. end_zone, 0))
  1492. nr_reclaimed += shrink_zone(priority, zone, &sc);
  1493. reclaim_state->reclaimed_slab = 0;
  1494. nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
  1495. lru_pages);
  1496. nr_reclaimed += reclaim_state->reclaimed_slab;
  1497. total_scanned += sc.nr_scanned;
  1498. if (zone_is_all_unreclaimable(zone))
  1499. continue;
  1500. if (nr_slab == 0 && zone->pages_scanned >=
  1501. (zone_lru_pages(zone) * 6))
  1502. zone_set_flag(zone,
  1503. ZONE_ALL_UNRECLAIMABLE);
  1504. /*
  1505. * If we've done a decent amount of scanning and
  1506. * the reclaim ratio is low, start doing writepage
  1507. * even in laptop mode
  1508. */
  1509. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  1510. total_scanned > nr_reclaimed + nr_reclaimed / 2)
  1511. sc.may_writepage = 1;
  1512. }
  1513. if (all_zones_ok)
  1514. break; /* kswapd: all done */
  1515. /*
  1516. * OK, kswapd is getting into trouble. Take a nap, then take
  1517. * another pass across the zones.
  1518. */
  1519. if (total_scanned && priority < DEF_PRIORITY - 2)
  1520. congestion_wait(WRITE, HZ/10);
  1521. /*
  1522. * We do this so kswapd doesn't build up large priorities for
  1523. * example when it is freeing in parallel with allocators. It
  1524. * matches the direct reclaim path behaviour in terms of impact
  1525. * on zone->*_priority.
  1526. */
  1527. if (nr_reclaimed >= SWAP_CLUSTER_MAX)
  1528. break;
  1529. }
  1530. out:
  1531. /*
  1532. * Note within each zone the priority level at which this zone was
  1533. * brought into a happy state. So that the next thread which scans this
  1534. * zone will start out at that priority level.
  1535. */
  1536. for (i = 0; i < pgdat->nr_zones; i++) {
  1537. struct zone *zone = pgdat->node_zones + i;
  1538. zone->prev_priority = temp_priority[i];
  1539. }
  1540. if (!all_zones_ok) {
  1541. cond_resched();
  1542. try_to_freeze();
  1543. goto loop_again;
  1544. }
  1545. return nr_reclaimed;
  1546. }
  1547. /*
  1548. * The background pageout daemon, started as a kernel thread
  1549. * from the init process.
  1550. *
  1551. * This basically trickles out pages so that we have _some_
  1552. * free memory available even if there is no other activity
  1553. * that frees anything up. This is needed for things like routing
  1554. * etc, where we otherwise might have all activity going on in
  1555. * asynchronous contexts that cannot page things out.
  1556. *
  1557. * If there are applications that are active memory-allocators
  1558. * (most normal use), this basically shouldn't matter.
  1559. */
  1560. static int kswapd(void *p)
  1561. {
  1562. unsigned long order;
  1563. pg_data_t *pgdat = (pg_data_t*)p;
  1564. struct task_struct *tsk = current;
  1565. DEFINE_WAIT(wait);
  1566. struct reclaim_state reclaim_state = {
  1567. .reclaimed_slab = 0,
  1568. };
  1569. node_to_cpumask_ptr(cpumask, pgdat->node_id);
  1570. if (!cpus_empty(*cpumask))
  1571. set_cpus_allowed_ptr(tsk, cpumask);
  1572. current->reclaim_state = &reclaim_state;
  1573. /*
  1574. * Tell the memory management that we're a "memory allocator",
  1575. * and that if we need more memory we should get access to it
  1576. * regardless (see "__alloc_pages()"). "kswapd" should
  1577. * never get caught in the normal page freeing logic.
  1578. *
  1579. * (Kswapd normally doesn't need memory anyway, but sometimes
  1580. * you need a small amount of memory in order to be able to
  1581. * page out something else, and this flag essentially protects
  1582. * us from recursively trying to free more memory as we're
  1583. * trying to free the first piece of memory in the first place).
  1584. */
  1585. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  1586. set_freezable();
  1587. order = 0;
  1588. for ( ; ; ) {
  1589. unsigned long new_order;
  1590. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  1591. new_order = pgdat->kswapd_max_order;
  1592. pgdat->kswapd_max_order = 0;
  1593. if (order < new_order) {
  1594. /*
  1595. * Don't sleep if someone wants a larger 'order'
  1596. * allocation
  1597. */
  1598. order = new_order;
  1599. } else {
  1600. if (!freezing(current))
  1601. schedule();
  1602. order = pgdat->kswapd_max_order;
  1603. }
  1604. finish_wait(&pgdat->kswapd_wait, &wait);
  1605. if (!try_to_freeze()) {
  1606. /* We can speed up thawing tasks if we don't call
  1607. * balance_pgdat after returning from the refrigerator
  1608. */
  1609. balance_pgdat(pgdat, order);
  1610. }
  1611. }
  1612. return 0;
  1613. }
  1614. /*
  1615. * A zone is low on free memory, so wake its kswapd task to service it.
  1616. */
  1617. void wakeup_kswapd(struct zone *zone, int order)
  1618. {
  1619. pg_data_t *pgdat;
  1620. if (!populated_zone(zone))
  1621. return;
  1622. pgdat = zone->zone_pgdat;
  1623. if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
  1624. return;
  1625. if (pgdat->kswapd_max_order < order)
  1626. pgdat->kswapd_max_order = order;
  1627. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1628. return;
  1629. if (!waitqueue_active(&pgdat->kswapd_wait))
  1630. return;
  1631. wake_up_interruptible(&pgdat->kswapd_wait);
  1632. }
  1633. unsigned long global_lru_pages(void)
  1634. {
  1635. return global_page_state(NR_ACTIVE_ANON)
  1636. + global_page_state(NR_ACTIVE_FILE)
  1637. + global_page_state(NR_INACTIVE_ANON)
  1638. + global_page_state(NR_INACTIVE_FILE);
  1639. }
  1640. #ifdef CONFIG_PM
  1641. /*
  1642. * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
  1643. * from LRU lists system-wide, for given pass and priority, and returns the
  1644. * number of reclaimed pages
  1645. *
  1646. * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  1647. */
  1648. static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
  1649. int pass, struct scan_control *sc)
  1650. {
  1651. struct zone *zone;
  1652. unsigned long nr_to_scan, ret = 0;
  1653. enum lru_list l;
  1654. for_each_zone(zone) {
  1655. if (!populated_zone(zone))
  1656. continue;
  1657. if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
  1658. continue;
  1659. for_each_lru(l) {
  1660. /* For pass = 0 we don't shrink the active list */
  1661. if (pass == 0 &&
  1662. (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
  1663. continue;
  1664. zone->lru[l].nr_scan +=
  1665. (zone_page_state(zone, NR_LRU_BASE + l)
  1666. >> prio) + 1;
  1667. if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
  1668. zone->lru[l].nr_scan = 0;
  1669. nr_to_scan = min(nr_pages,
  1670. zone_page_state(zone,
  1671. NR_LRU_BASE + l));
  1672. ret += shrink_list(l, nr_to_scan, zone,
  1673. sc, prio);
  1674. if (ret >= nr_pages)
  1675. return ret;
  1676. }
  1677. }
  1678. }
  1679. return ret;
  1680. }
  1681. /*
  1682. * Try to free `nr_pages' of memory, system-wide, and return the number of
  1683. * freed pages.
  1684. *
  1685. * Rather than trying to age LRUs the aim is to preserve the overall
  1686. * LRU order by reclaiming preferentially
  1687. * inactive > active > active referenced > active mapped
  1688. */
  1689. unsigned long shrink_all_memory(unsigned long nr_pages)
  1690. {
  1691. unsigned long lru_pages, nr_slab;
  1692. unsigned long ret = 0;
  1693. int pass;
  1694. struct reclaim_state reclaim_state;
  1695. struct scan_control sc = {
  1696. .gfp_mask = GFP_KERNEL,
  1697. .may_swap = 0,
  1698. .swap_cluster_max = nr_pages,
  1699. .may_writepage = 1,
  1700. .swappiness = vm_swappiness,
  1701. .isolate_pages = isolate_pages_global,
  1702. };
  1703. current->reclaim_state = &reclaim_state;
  1704. lru_pages = global_lru_pages();
  1705. nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
  1706. /* If slab caches are huge, it's better to hit them first */
  1707. while (nr_slab >= lru_pages) {
  1708. reclaim_state.reclaimed_slab = 0;
  1709. shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
  1710. if (!reclaim_state.reclaimed_slab)
  1711. break;
  1712. ret += reclaim_state.reclaimed_slab;
  1713. if (ret >= nr_pages)
  1714. goto out;
  1715. nr_slab -= reclaim_state.reclaimed_slab;
  1716. }
  1717. /*
  1718. * We try to shrink LRUs in 5 passes:
  1719. * 0 = Reclaim from inactive_list only
  1720. * 1 = Reclaim from active list but don't reclaim mapped
  1721. * 2 = 2nd pass of type 1
  1722. * 3 = Reclaim mapped (normal reclaim)
  1723. * 4 = 2nd pass of type 3
  1724. */
  1725. for (pass = 0; pass < 5; pass++) {
  1726. int prio;
  1727. /* Force reclaiming mapped pages in the passes #3 and #4 */
  1728. if (pass > 2) {
  1729. sc.may_swap = 1;
  1730. sc.swappiness = 100;
  1731. }
  1732. for (prio = DEF_PRIORITY; prio >= 0; prio--) {
  1733. unsigned long nr_to_scan = nr_pages - ret;
  1734. sc.nr_scanned = 0;
  1735. ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
  1736. if (ret >= nr_pages)
  1737. goto out;
  1738. reclaim_state.reclaimed_slab = 0;
  1739. shrink_slab(sc.nr_scanned, sc.gfp_mask,
  1740. global_lru_pages());
  1741. ret += reclaim_state.reclaimed_slab;
  1742. if (ret >= nr_pages)
  1743. goto out;
  1744. if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
  1745. congestion_wait(WRITE, HZ / 10);
  1746. }
  1747. }
  1748. /*
  1749. * If ret = 0, we could not shrink LRUs, but there may be something
  1750. * in slab caches
  1751. */
  1752. if (!ret) {
  1753. do {
  1754. reclaim_state.reclaimed_slab = 0;
  1755. shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
  1756. ret += reclaim_state.reclaimed_slab;
  1757. } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
  1758. }
  1759. out:
  1760. current->reclaim_state = NULL;
  1761. return ret;
  1762. }
  1763. #endif
  1764. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  1765. not required for correctness. So if the last cpu in a node goes
  1766. away, we get changed to run anywhere: as the first one comes back,
  1767. restore their cpu bindings. */
  1768. static int __devinit cpu_callback(struct notifier_block *nfb,
  1769. unsigned long action, void *hcpu)
  1770. {
  1771. int nid;
  1772. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  1773. for_each_node_state(nid, N_HIGH_MEMORY) {
  1774. pg_data_t *pgdat = NODE_DATA(nid);
  1775. node_to_cpumask_ptr(mask, pgdat->node_id);
  1776. if (any_online_cpu(*mask) < nr_cpu_ids)
  1777. /* One of our CPUs online: restore mask */
  1778. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  1779. }
  1780. }
  1781. return NOTIFY_OK;
  1782. }
  1783. /*
  1784. * This kswapd start function will be called by init and node-hot-add.
  1785. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  1786. */
  1787. int kswapd_run(int nid)
  1788. {
  1789. pg_data_t *pgdat = NODE_DATA(nid);
  1790. int ret = 0;
  1791. if (pgdat->kswapd)
  1792. return 0;
  1793. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  1794. if (IS_ERR(pgdat->kswapd)) {
  1795. /* failure at boot is fatal */
  1796. BUG_ON(system_state == SYSTEM_BOOTING);
  1797. printk("Failed to start kswapd on node %d\n",nid);
  1798. ret = -1;
  1799. }
  1800. return ret;
  1801. }
  1802. static int __init kswapd_init(void)
  1803. {
  1804. int nid;
  1805. swap_setup();
  1806. for_each_node_state(nid, N_HIGH_MEMORY)
  1807. kswapd_run(nid);
  1808. hotcpu_notifier(cpu_callback, 0);
  1809. return 0;
  1810. }
  1811. module_init(kswapd_init)
  1812. #ifdef CONFIG_NUMA
  1813. /*
  1814. * Zone reclaim mode
  1815. *
  1816. * If non-zero call zone_reclaim when the number of free pages falls below
  1817. * the watermarks.
  1818. */
  1819. int zone_reclaim_mode __read_mostly;
  1820. #define RECLAIM_OFF 0
  1821. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  1822. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  1823. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  1824. /*
  1825. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  1826. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  1827. * a zone.
  1828. */
  1829. #define ZONE_RECLAIM_PRIORITY 4
  1830. /*
  1831. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  1832. * occur.
  1833. */
  1834. int sysctl_min_unmapped_ratio = 1;
  1835. /*
  1836. * If the number of slab pages in a zone grows beyond this percentage then
  1837. * slab reclaim needs to occur.
  1838. */
  1839. int sysctl_min_slab_ratio = 5;
  1840. /*
  1841. * Try to free up some pages from this zone through reclaim.
  1842. */
  1843. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  1844. {
  1845. /* Minimum pages needed in order to stay on node */
  1846. const unsigned long nr_pages = 1 << order;
  1847. struct task_struct *p = current;
  1848. struct reclaim_state reclaim_state;
  1849. int priority;
  1850. unsigned long nr_reclaimed = 0;
  1851. struct scan_control sc = {
  1852. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  1853. .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  1854. .swap_cluster_max = max_t(unsigned long, nr_pages,
  1855. SWAP_CLUSTER_MAX),
  1856. .gfp_mask = gfp_mask,
  1857. .swappiness = vm_swappiness,
  1858. .isolate_pages = isolate_pages_global,
  1859. };
  1860. unsigned long slab_reclaimable;
  1861. disable_swap_token();
  1862. cond_resched();
  1863. /*
  1864. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  1865. * and we also need to be able to write out pages for RECLAIM_WRITE
  1866. * and RECLAIM_SWAP.
  1867. */
  1868. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  1869. reclaim_state.reclaimed_slab = 0;
  1870. p->reclaim_state = &reclaim_state;
  1871. if (zone_page_state(zone, NR_FILE_PAGES) -
  1872. zone_page_state(zone, NR_FILE_MAPPED) >
  1873. zone->min_unmapped_pages) {
  1874. /*
  1875. * Free memory by calling shrink zone with increasing
  1876. * priorities until we have enough memory freed.
  1877. */
  1878. priority = ZONE_RECLAIM_PRIORITY;
  1879. do {
  1880. note_zone_scanning_priority(zone, priority);
  1881. nr_reclaimed += shrink_zone(priority, zone, &sc);
  1882. priority--;
  1883. } while (priority >= 0 && nr_reclaimed < nr_pages);
  1884. }
  1885. slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  1886. if (slab_reclaimable > zone->min_slab_pages) {
  1887. /*
  1888. * shrink_slab() does not currently allow us to determine how
  1889. * many pages were freed in this zone. So we take the current
  1890. * number of slab pages and shake the slab until it is reduced
  1891. * by the same nr_pages that we used for reclaiming unmapped
  1892. * pages.
  1893. *
  1894. * Note that shrink_slab will free memory on all zones and may
  1895. * take a long time.
  1896. */
  1897. while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
  1898. zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
  1899. slab_reclaimable - nr_pages)
  1900. ;
  1901. /*
  1902. * Update nr_reclaimed by the number of slab pages we
  1903. * reclaimed from this zone.
  1904. */
  1905. nr_reclaimed += slab_reclaimable -
  1906. zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  1907. }
  1908. p->reclaim_state = NULL;
  1909. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  1910. return nr_reclaimed >= nr_pages;
  1911. }
  1912. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  1913. {
  1914. int node_id;
  1915. int ret;
  1916. /*
  1917. * Zone reclaim reclaims unmapped file backed pages and
  1918. * slab pages if we are over the defined limits.
  1919. *
  1920. * A small portion of unmapped file backed pages is needed for
  1921. * file I/O otherwise pages read by file I/O will be immediately
  1922. * thrown out if the zone is overallocated. So we do not reclaim
  1923. * if less than a specified percentage of the zone is used by
  1924. * unmapped file backed pages.
  1925. */
  1926. if (zone_page_state(zone, NR_FILE_PAGES) -
  1927. zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
  1928. && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
  1929. <= zone->min_slab_pages)
  1930. return 0;
  1931. if (zone_is_all_unreclaimable(zone))
  1932. return 0;
  1933. /*
  1934. * Do not scan if the allocation should not be delayed.
  1935. */
  1936. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  1937. return 0;
  1938. /*
  1939. * Only run zone reclaim on the local zone or on zones that do not
  1940. * have associated processors. This will favor the local processor
  1941. * over remote processors and spread off node memory allocations
  1942. * as wide as possible.
  1943. */
  1944. node_id = zone_to_nid(zone);
  1945. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  1946. return 0;
  1947. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  1948. return 0;
  1949. ret = __zone_reclaim(zone, gfp_mask, order);
  1950. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  1951. return ret;
  1952. }
  1953. #endif