vmscan.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/gfp.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/div64.h>
  44. #include <linux/swapops.h>
  45. #include "internal.h"
  46. #define CREATE_TRACE_POINTS
  47. #include <trace/events/vmscan.h>
  48. struct scan_control {
  49. /* Incremented by the number of inactive pages that were scanned */
  50. unsigned long nr_scanned;
  51. /* Number of pages freed so far during a call to shrink_zones() */
  52. unsigned long nr_reclaimed;
  53. /* How many pages shrink_list() should reclaim */
  54. unsigned long nr_to_reclaim;
  55. unsigned long hibernation_mode;
  56. /* This context's GFP mask */
  57. gfp_t gfp_mask;
  58. int may_writepage;
  59. /* Can mapped pages be reclaimed? */
  60. int may_unmap;
  61. /* Can pages be swapped as part of reclaim? */
  62. int may_swap;
  63. int swappiness;
  64. int order;
  65. /*
  66. * Intend to reclaim enough contenious memory rather than to reclaim
  67. * enough amount memory. I.e, it's the mode for high order allocation.
  68. */
  69. bool lumpy_reclaim_mode;
  70. /* Which cgroup do we reclaim from */
  71. struct mem_cgroup *mem_cgroup;
  72. /*
  73. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  74. * are scanned.
  75. */
  76. nodemask_t *nodemask;
  77. };
  78. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  79. #ifdef ARCH_HAS_PREFETCH
  80. #define prefetch_prev_lru_page(_page, _base, _field) \
  81. do { \
  82. if ((_page)->lru.prev != _base) { \
  83. struct page *prev; \
  84. \
  85. prev = lru_to_page(&(_page->lru)); \
  86. prefetch(&prev->_field); \
  87. } \
  88. } while (0)
  89. #else
  90. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  91. #endif
  92. #ifdef ARCH_HAS_PREFETCHW
  93. #define prefetchw_prev_lru_page(_page, _base, _field) \
  94. do { \
  95. if ((_page)->lru.prev != _base) { \
  96. struct page *prev; \
  97. \
  98. prev = lru_to_page(&(_page->lru)); \
  99. prefetchw(&prev->_field); \
  100. } \
  101. } while (0)
  102. #else
  103. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  104. #endif
  105. /*
  106. * From 0 .. 100. Higher means more swappy.
  107. */
  108. int vm_swappiness = 60;
  109. long vm_total_pages; /* The total number of pages which the VM controls */
  110. static LIST_HEAD(shrinker_list);
  111. static DECLARE_RWSEM(shrinker_rwsem);
  112. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  113. #define scanning_global_lru(sc) (!(sc)->mem_cgroup)
  114. #else
  115. #define scanning_global_lru(sc) (1)
  116. #endif
  117. static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
  118. struct scan_control *sc)
  119. {
  120. if (!scanning_global_lru(sc))
  121. return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
  122. return &zone->reclaim_stat;
  123. }
  124. static unsigned long zone_nr_lru_pages(struct zone *zone,
  125. struct scan_control *sc, enum lru_list lru)
  126. {
  127. if (!scanning_global_lru(sc))
  128. return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
  129. return zone_page_state(zone, NR_LRU_BASE + lru);
  130. }
  131. /*
  132. * Add a shrinker callback to be called from the vm
  133. */
  134. void register_shrinker(struct shrinker *shrinker)
  135. {
  136. shrinker->nr = 0;
  137. down_write(&shrinker_rwsem);
  138. list_add_tail(&shrinker->list, &shrinker_list);
  139. up_write(&shrinker_rwsem);
  140. }
  141. EXPORT_SYMBOL(register_shrinker);
  142. /*
  143. * Remove one
  144. */
  145. void unregister_shrinker(struct shrinker *shrinker)
  146. {
  147. down_write(&shrinker_rwsem);
  148. list_del(&shrinker->list);
  149. up_write(&shrinker_rwsem);
  150. }
  151. EXPORT_SYMBOL(unregister_shrinker);
  152. #define SHRINK_BATCH 128
  153. /*
  154. * Call the shrink functions to age shrinkable caches
  155. *
  156. * Here we assume it costs one seek to replace a lru page and that it also
  157. * takes a seek to recreate a cache object. With this in mind we age equal
  158. * percentages of the lru and ageable caches. This should balance the seeks
  159. * generated by these structures.
  160. *
  161. * If the vm encountered mapped pages on the LRU it increase the pressure on
  162. * slab to avoid swapping.
  163. *
  164. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  165. *
  166. * `lru_pages' represents the number of on-LRU pages in all the zones which
  167. * are eligible for the caller's allocation attempt. It is used for balancing
  168. * slab reclaim versus page reclaim.
  169. *
  170. * Returns the number of slab objects which we shrunk.
  171. */
  172. unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
  173. unsigned long lru_pages)
  174. {
  175. struct shrinker *shrinker;
  176. unsigned long ret = 0;
  177. if (scanned == 0)
  178. scanned = SWAP_CLUSTER_MAX;
  179. if (!down_read_trylock(&shrinker_rwsem))
  180. return 1; /* Assume we'll be able to shrink next time */
  181. list_for_each_entry(shrinker, &shrinker_list, list) {
  182. unsigned long long delta;
  183. unsigned long total_scan;
  184. unsigned long max_pass;
  185. max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
  186. delta = (4 * scanned) / shrinker->seeks;
  187. delta *= max_pass;
  188. do_div(delta, lru_pages + 1);
  189. shrinker->nr += delta;
  190. if (shrinker->nr < 0) {
  191. printk(KERN_ERR "shrink_slab: %pF negative objects to "
  192. "delete nr=%ld\n",
  193. shrinker->shrink, shrinker->nr);
  194. shrinker->nr = max_pass;
  195. }
  196. /*
  197. * Avoid risking looping forever due to too large nr value:
  198. * never try to free more than twice the estimate number of
  199. * freeable entries.
  200. */
  201. if (shrinker->nr > max_pass * 2)
  202. shrinker->nr = max_pass * 2;
  203. total_scan = shrinker->nr;
  204. shrinker->nr = 0;
  205. while (total_scan >= SHRINK_BATCH) {
  206. long this_scan = SHRINK_BATCH;
  207. int shrink_ret;
  208. int nr_before;
  209. nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
  210. shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
  211. gfp_mask);
  212. if (shrink_ret == -1)
  213. break;
  214. if (shrink_ret < nr_before)
  215. ret += nr_before - shrink_ret;
  216. count_vm_events(SLABS_SCANNED, this_scan);
  217. total_scan -= this_scan;
  218. cond_resched();
  219. }
  220. shrinker->nr += total_scan;
  221. }
  222. up_read(&shrinker_rwsem);
  223. return ret;
  224. }
  225. static inline int is_page_cache_freeable(struct page *page)
  226. {
  227. /*
  228. * A freeable page cache page is referenced only by the caller
  229. * that isolated the page, the page cache radix tree and
  230. * optional buffer heads at page->private.
  231. */
  232. return page_count(page) - page_has_private(page) == 2;
  233. }
  234. static int may_write_to_queue(struct backing_dev_info *bdi)
  235. {
  236. if (current->flags & PF_SWAPWRITE)
  237. return 1;
  238. if (!bdi_write_congested(bdi))
  239. return 1;
  240. if (bdi == current->backing_dev_info)
  241. return 1;
  242. return 0;
  243. }
  244. /*
  245. * We detected a synchronous write error writing a page out. Probably
  246. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  247. * fsync(), msync() or close().
  248. *
  249. * The tricky part is that after writepage we cannot touch the mapping: nothing
  250. * prevents it from being freed up. But we have a ref on the page and once
  251. * that page is locked, the mapping is pinned.
  252. *
  253. * We're allowed to run sleeping lock_page() here because we know the caller has
  254. * __GFP_FS.
  255. */
  256. static void handle_write_error(struct address_space *mapping,
  257. struct page *page, int error)
  258. {
  259. lock_page_nosync(page);
  260. if (page_mapping(page) == mapping)
  261. mapping_set_error(mapping, error);
  262. unlock_page(page);
  263. }
  264. /* Request for sync pageout. */
  265. enum pageout_io {
  266. PAGEOUT_IO_ASYNC,
  267. PAGEOUT_IO_SYNC,
  268. };
  269. /* possible outcome of pageout() */
  270. typedef enum {
  271. /* failed to write page out, page is locked */
  272. PAGE_KEEP,
  273. /* move page to the active list, page is locked */
  274. PAGE_ACTIVATE,
  275. /* page has been sent to the disk successfully, page is unlocked */
  276. PAGE_SUCCESS,
  277. /* page is clean and locked */
  278. PAGE_CLEAN,
  279. } pageout_t;
  280. /*
  281. * pageout is called by shrink_page_list() for each dirty page.
  282. * Calls ->writepage().
  283. */
  284. static pageout_t pageout(struct page *page, struct address_space *mapping,
  285. enum pageout_io sync_writeback)
  286. {
  287. /*
  288. * If the page is dirty, only perform writeback if that write
  289. * will be non-blocking. To prevent this allocation from being
  290. * stalled by pagecache activity. But note that there may be
  291. * stalls if we need to run get_block(). We could test
  292. * PagePrivate for that.
  293. *
  294. * If this process is currently in __generic_file_aio_write() against
  295. * this page's queue, we can perform writeback even if that
  296. * will block.
  297. *
  298. * If the page is swapcache, write it back even if that would
  299. * block, for some throttling. This happens by accident, because
  300. * swap_backing_dev_info is bust: it doesn't reflect the
  301. * congestion state of the swapdevs. Easy to fix, if needed.
  302. */
  303. if (!is_page_cache_freeable(page))
  304. return PAGE_KEEP;
  305. if (!mapping) {
  306. /*
  307. * Some data journaling orphaned pages can have
  308. * page->mapping == NULL while being dirty with clean buffers.
  309. */
  310. if (page_has_private(page)) {
  311. if (try_to_free_buffers(page)) {
  312. ClearPageDirty(page);
  313. printk("%s: orphaned page\n", __func__);
  314. return PAGE_CLEAN;
  315. }
  316. }
  317. return PAGE_KEEP;
  318. }
  319. if (mapping->a_ops->writepage == NULL)
  320. return PAGE_ACTIVATE;
  321. if (!may_write_to_queue(mapping->backing_dev_info))
  322. return PAGE_KEEP;
  323. if (clear_page_dirty_for_io(page)) {
  324. int res;
  325. struct writeback_control wbc = {
  326. .sync_mode = WB_SYNC_NONE,
  327. .nr_to_write = SWAP_CLUSTER_MAX,
  328. .range_start = 0,
  329. .range_end = LLONG_MAX,
  330. .nonblocking = 1,
  331. .for_reclaim = 1,
  332. };
  333. SetPageReclaim(page);
  334. res = mapping->a_ops->writepage(page, &wbc);
  335. if (res < 0)
  336. handle_write_error(mapping, page, res);
  337. if (res == AOP_WRITEPAGE_ACTIVATE) {
  338. ClearPageReclaim(page);
  339. return PAGE_ACTIVATE;
  340. }
  341. /*
  342. * Wait on writeback if requested to. This happens when
  343. * direct reclaiming a large contiguous area and the
  344. * first attempt to free a range of pages fails.
  345. */
  346. if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
  347. wait_on_page_writeback(page);
  348. if (!PageWriteback(page)) {
  349. /* synchronous write or broken a_ops? */
  350. ClearPageReclaim(page);
  351. }
  352. trace_mm_vmscan_writepage(page,
  353. trace_reclaim_flags(page, sync_writeback));
  354. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  355. return PAGE_SUCCESS;
  356. }
  357. return PAGE_CLEAN;
  358. }
  359. /*
  360. * Same as remove_mapping, but if the page is removed from the mapping, it
  361. * gets returned with a refcount of 0.
  362. */
  363. static int __remove_mapping(struct address_space *mapping, struct page *page)
  364. {
  365. BUG_ON(!PageLocked(page));
  366. BUG_ON(mapping != page_mapping(page));
  367. spin_lock_irq(&mapping->tree_lock);
  368. /*
  369. * The non racy check for a busy page.
  370. *
  371. * Must be careful with the order of the tests. When someone has
  372. * a ref to the page, it may be possible that they dirty it then
  373. * drop the reference. So if PageDirty is tested before page_count
  374. * here, then the following race may occur:
  375. *
  376. * get_user_pages(&page);
  377. * [user mapping goes away]
  378. * write_to(page);
  379. * !PageDirty(page) [good]
  380. * SetPageDirty(page);
  381. * put_page(page);
  382. * !page_count(page) [good, discard it]
  383. *
  384. * [oops, our write_to data is lost]
  385. *
  386. * Reversing the order of the tests ensures such a situation cannot
  387. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  388. * load is not satisfied before that of page->_count.
  389. *
  390. * Note that if SetPageDirty is always performed via set_page_dirty,
  391. * and thus under tree_lock, then this ordering is not required.
  392. */
  393. if (!page_freeze_refs(page, 2))
  394. goto cannot_free;
  395. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  396. if (unlikely(PageDirty(page))) {
  397. page_unfreeze_refs(page, 2);
  398. goto cannot_free;
  399. }
  400. if (PageSwapCache(page)) {
  401. swp_entry_t swap = { .val = page_private(page) };
  402. __delete_from_swap_cache(page);
  403. spin_unlock_irq(&mapping->tree_lock);
  404. swapcache_free(swap, page);
  405. } else {
  406. __remove_from_page_cache(page);
  407. spin_unlock_irq(&mapping->tree_lock);
  408. mem_cgroup_uncharge_cache_page(page);
  409. }
  410. return 1;
  411. cannot_free:
  412. spin_unlock_irq(&mapping->tree_lock);
  413. return 0;
  414. }
  415. /*
  416. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  417. * someone else has a ref on the page, abort and return 0. If it was
  418. * successfully detached, return 1. Assumes the caller has a single ref on
  419. * this page.
  420. */
  421. int remove_mapping(struct address_space *mapping, struct page *page)
  422. {
  423. if (__remove_mapping(mapping, page)) {
  424. /*
  425. * Unfreezing the refcount with 1 rather than 2 effectively
  426. * drops the pagecache ref for us without requiring another
  427. * atomic operation.
  428. */
  429. page_unfreeze_refs(page, 1);
  430. return 1;
  431. }
  432. return 0;
  433. }
  434. /**
  435. * putback_lru_page - put previously isolated page onto appropriate LRU list
  436. * @page: page to be put back to appropriate lru list
  437. *
  438. * Add previously isolated @page to appropriate LRU list.
  439. * Page may still be unevictable for other reasons.
  440. *
  441. * lru_lock must not be held, interrupts must be enabled.
  442. */
  443. void putback_lru_page(struct page *page)
  444. {
  445. int lru;
  446. int active = !!TestClearPageActive(page);
  447. int was_unevictable = PageUnevictable(page);
  448. VM_BUG_ON(PageLRU(page));
  449. redo:
  450. ClearPageUnevictable(page);
  451. if (page_evictable(page, NULL)) {
  452. /*
  453. * For evictable pages, we can use the cache.
  454. * In event of a race, worst case is we end up with an
  455. * unevictable page on [in]active list.
  456. * We know how to handle that.
  457. */
  458. lru = active + page_lru_base_type(page);
  459. lru_cache_add_lru(page, lru);
  460. } else {
  461. /*
  462. * Put unevictable pages directly on zone's unevictable
  463. * list.
  464. */
  465. lru = LRU_UNEVICTABLE;
  466. add_page_to_unevictable_list(page);
  467. /*
  468. * When racing with an mlock clearing (page is
  469. * unlocked), make sure that if the other thread does
  470. * not observe our setting of PG_lru and fails
  471. * isolation, we see PG_mlocked cleared below and move
  472. * the page back to the evictable list.
  473. *
  474. * The other side is TestClearPageMlocked().
  475. */
  476. smp_mb();
  477. }
  478. /*
  479. * page's status can change while we move it among lru. If an evictable
  480. * page is on unevictable list, it never be freed. To avoid that,
  481. * check after we added it to the list, again.
  482. */
  483. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  484. if (!isolate_lru_page(page)) {
  485. put_page(page);
  486. goto redo;
  487. }
  488. /* This means someone else dropped this page from LRU
  489. * So, it will be freed or putback to LRU again. There is
  490. * nothing to do here.
  491. */
  492. }
  493. if (was_unevictable && lru != LRU_UNEVICTABLE)
  494. count_vm_event(UNEVICTABLE_PGRESCUED);
  495. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  496. count_vm_event(UNEVICTABLE_PGCULLED);
  497. put_page(page); /* drop ref from isolate */
  498. }
  499. enum page_references {
  500. PAGEREF_RECLAIM,
  501. PAGEREF_RECLAIM_CLEAN,
  502. PAGEREF_KEEP,
  503. PAGEREF_ACTIVATE,
  504. };
  505. static enum page_references page_check_references(struct page *page,
  506. struct scan_control *sc)
  507. {
  508. int referenced_ptes, referenced_page;
  509. unsigned long vm_flags;
  510. referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
  511. referenced_page = TestClearPageReferenced(page);
  512. /* Lumpy reclaim - ignore references */
  513. if (sc->lumpy_reclaim_mode)
  514. return PAGEREF_RECLAIM;
  515. /*
  516. * Mlock lost the isolation race with us. Let try_to_unmap()
  517. * move the page to the unevictable list.
  518. */
  519. if (vm_flags & VM_LOCKED)
  520. return PAGEREF_RECLAIM;
  521. if (referenced_ptes) {
  522. if (PageAnon(page))
  523. return PAGEREF_ACTIVATE;
  524. /*
  525. * All mapped pages start out with page table
  526. * references from the instantiating fault, so we need
  527. * to look twice if a mapped file page is used more
  528. * than once.
  529. *
  530. * Mark it and spare it for another trip around the
  531. * inactive list. Another page table reference will
  532. * lead to its activation.
  533. *
  534. * Note: the mark is set for activated pages as well
  535. * so that recently deactivated but used pages are
  536. * quickly recovered.
  537. */
  538. SetPageReferenced(page);
  539. if (referenced_page)
  540. return PAGEREF_ACTIVATE;
  541. return PAGEREF_KEEP;
  542. }
  543. /* Reclaim if clean, defer dirty pages to writeback */
  544. if (referenced_page)
  545. return PAGEREF_RECLAIM_CLEAN;
  546. return PAGEREF_RECLAIM;
  547. }
  548. static noinline_for_stack void free_page_list(struct list_head *free_pages)
  549. {
  550. struct pagevec freed_pvec;
  551. struct page *page, *tmp;
  552. pagevec_init(&freed_pvec, 1);
  553. list_for_each_entry_safe(page, tmp, free_pages, lru) {
  554. list_del(&page->lru);
  555. if (!pagevec_add(&freed_pvec, page)) {
  556. __pagevec_free(&freed_pvec);
  557. pagevec_reinit(&freed_pvec);
  558. }
  559. }
  560. pagevec_free(&freed_pvec);
  561. }
  562. /*
  563. * shrink_page_list() returns the number of reclaimed pages
  564. */
  565. static unsigned long shrink_page_list(struct list_head *page_list,
  566. struct scan_control *sc,
  567. enum pageout_io sync_writeback)
  568. {
  569. LIST_HEAD(ret_pages);
  570. LIST_HEAD(free_pages);
  571. int pgactivate = 0;
  572. unsigned long nr_reclaimed = 0;
  573. cond_resched();
  574. while (!list_empty(page_list)) {
  575. enum page_references references;
  576. struct address_space *mapping;
  577. struct page *page;
  578. int may_enter_fs;
  579. cond_resched();
  580. page = lru_to_page(page_list);
  581. list_del(&page->lru);
  582. if (!trylock_page(page))
  583. goto keep;
  584. VM_BUG_ON(PageActive(page));
  585. sc->nr_scanned++;
  586. if (unlikely(!page_evictable(page, NULL)))
  587. goto cull_mlocked;
  588. if (!sc->may_unmap && page_mapped(page))
  589. goto keep_locked;
  590. /* Double the slab pressure for mapped and swapcache pages */
  591. if (page_mapped(page) || PageSwapCache(page))
  592. sc->nr_scanned++;
  593. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  594. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  595. if (PageWriteback(page)) {
  596. /*
  597. * Synchronous reclaim is performed in two passes,
  598. * first an asynchronous pass over the list to
  599. * start parallel writeback, and a second synchronous
  600. * pass to wait for the IO to complete. Wait here
  601. * for any page for which writeback has already
  602. * started.
  603. */
  604. if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
  605. wait_on_page_writeback(page);
  606. else
  607. goto keep_locked;
  608. }
  609. references = page_check_references(page, sc);
  610. switch (references) {
  611. case PAGEREF_ACTIVATE:
  612. goto activate_locked;
  613. case PAGEREF_KEEP:
  614. goto keep_locked;
  615. case PAGEREF_RECLAIM:
  616. case PAGEREF_RECLAIM_CLEAN:
  617. ; /* try to reclaim the page below */
  618. }
  619. /*
  620. * Anonymous process memory has backing store?
  621. * Try to allocate it some swap space here.
  622. */
  623. if (PageAnon(page) && !PageSwapCache(page)) {
  624. if (!(sc->gfp_mask & __GFP_IO))
  625. goto keep_locked;
  626. if (!add_to_swap(page))
  627. goto activate_locked;
  628. may_enter_fs = 1;
  629. }
  630. mapping = page_mapping(page);
  631. /*
  632. * The page is mapped into the page tables of one or more
  633. * processes. Try to unmap it here.
  634. */
  635. if (page_mapped(page) && mapping) {
  636. switch (try_to_unmap(page, TTU_UNMAP)) {
  637. case SWAP_FAIL:
  638. goto activate_locked;
  639. case SWAP_AGAIN:
  640. goto keep_locked;
  641. case SWAP_MLOCK:
  642. goto cull_mlocked;
  643. case SWAP_SUCCESS:
  644. ; /* try to free the page below */
  645. }
  646. }
  647. if (PageDirty(page)) {
  648. if (references == PAGEREF_RECLAIM_CLEAN)
  649. goto keep_locked;
  650. if (!may_enter_fs)
  651. goto keep_locked;
  652. if (!sc->may_writepage)
  653. goto keep_locked;
  654. /* Page is dirty, try to write it out here */
  655. switch (pageout(page, mapping, sync_writeback)) {
  656. case PAGE_KEEP:
  657. goto keep_locked;
  658. case PAGE_ACTIVATE:
  659. goto activate_locked;
  660. case PAGE_SUCCESS:
  661. if (PageWriteback(page) || PageDirty(page))
  662. goto keep;
  663. /*
  664. * A synchronous write - probably a ramdisk. Go
  665. * ahead and try to reclaim the page.
  666. */
  667. if (!trylock_page(page))
  668. goto keep;
  669. if (PageDirty(page) || PageWriteback(page))
  670. goto keep_locked;
  671. mapping = page_mapping(page);
  672. case PAGE_CLEAN:
  673. ; /* try to free the page below */
  674. }
  675. }
  676. /*
  677. * If the page has buffers, try to free the buffer mappings
  678. * associated with this page. If we succeed we try to free
  679. * the page as well.
  680. *
  681. * We do this even if the page is PageDirty().
  682. * try_to_release_page() does not perform I/O, but it is
  683. * possible for a page to have PageDirty set, but it is actually
  684. * clean (all its buffers are clean). This happens if the
  685. * buffers were written out directly, with submit_bh(). ext3
  686. * will do this, as well as the blockdev mapping.
  687. * try_to_release_page() will discover that cleanness and will
  688. * drop the buffers and mark the page clean - it can be freed.
  689. *
  690. * Rarely, pages can have buffers and no ->mapping. These are
  691. * the pages which were not successfully invalidated in
  692. * truncate_complete_page(). We try to drop those buffers here
  693. * and if that worked, and the page is no longer mapped into
  694. * process address space (page_count == 1) it can be freed.
  695. * Otherwise, leave the page on the LRU so it is swappable.
  696. */
  697. if (page_has_private(page)) {
  698. if (!try_to_release_page(page, sc->gfp_mask))
  699. goto activate_locked;
  700. if (!mapping && page_count(page) == 1) {
  701. unlock_page(page);
  702. if (put_page_testzero(page))
  703. goto free_it;
  704. else {
  705. /*
  706. * rare race with speculative reference.
  707. * the speculative reference will free
  708. * this page shortly, so we may
  709. * increment nr_reclaimed here (and
  710. * leave it off the LRU).
  711. */
  712. nr_reclaimed++;
  713. continue;
  714. }
  715. }
  716. }
  717. if (!mapping || !__remove_mapping(mapping, page))
  718. goto keep_locked;
  719. /*
  720. * At this point, we have no other references and there is
  721. * no way to pick any more up (removed from LRU, removed
  722. * from pagecache). Can use non-atomic bitops now (and
  723. * we obviously don't have to worry about waking up a process
  724. * waiting on the page lock, because there are no references.
  725. */
  726. __clear_page_locked(page);
  727. free_it:
  728. nr_reclaimed++;
  729. /*
  730. * Is there need to periodically free_page_list? It would
  731. * appear not as the counts should be low
  732. */
  733. list_add(&page->lru, &free_pages);
  734. continue;
  735. cull_mlocked:
  736. if (PageSwapCache(page))
  737. try_to_free_swap(page);
  738. unlock_page(page);
  739. putback_lru_page(page);
  740. continue;
  741. activate_locked:
  742. /* Not a candidate for swapping, so reclaim swap space. */
  743. if (PageSwapCache(page) && vm_swap_full())
  744. try_to_free_swap(page);
  745. VM_BUG_ON(PageActive(page));
  746. SetPageActive(page);
  747. pgactivate++;
  748. keep_locked:
  749. unlock_page(page);
  750. keep:
  751. list_add(&page->lru, &ret_pages);
  752. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  753. }
  754. free_page_list(&free_pages);
  755. list_splice(&ret_pages, page_list);
  756. count_vm_events(PGACTIVATE, pgactivate);
  757. return nr_reclaimed;
  758. }
  759. /*
  760. * Attempt to remove the specified page from its LRU. Only take this page
  761. * if it is of the appropriate PageActive status. Pages which are being
  762. * freed elsewhere are also ignored.
  763. *
  764. * page: page to consider
  765. * mode: one of the LRU isolation modes defined above
  766. *
  767. * returns 0 on success, -ve errno on failure.
  768. */
  769. int __isolate_lru_page(struct page *page, int mode, int file)
  770. {
  771. int ret = -EINVAL;
  772. /* Only take pages on the LRU. */
  773. if (!PageLRU(page))
  774. return ret;
  775. /*
  776. * When checking the active state, we need to be sure we are
  777. * dealing with comparible boolean values. Take the logical not
  778. * of each.
  779. */
  780. if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
  781. return ret;
  782. if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
  783. return ret;
  784. /*
  785. * When this function is being called for lumpy reclaim, we
  786. * initially look into all LRU pages, active, inactive and
  787. * unevictable; only give shrink_page_list evictable pages.
  788. */
  789. if (PageUnevictable(page))
  790. return ret;
  791. ret = -EBUSY;
  792. if (likely(get_page_unless_zero(page))) {
  793. /*
  794. * Be careful not to clear PageLRU until after we're
  795. * sure the page is not being freed elsewhere -- the
  796. * page release code relies on it.
  797. */
  798. ClearPageLRU(page);
  799. ret = 0;
  800. }
  801. return ret;
  802. }
  803. /*
  804. * zone->lru_lock is heavily contended. Some of the functions that
  805. * shrink the lists perform better by taking out a batch of pages
  806. * and working on them outside the LRU lock.
  807. *
  808. * For pagecache intensive workloads, this function is the hottest
  809. * spot in the kernel (apart from copy_*_user functions).
  810. *
  811. * Appropriate locks must be held before calling this function.
  812. *
  813. * @nr_to_scan: The number of pages to look through on the list.
  814. * @src: The LRU list to pull pages off.
  815. * @dst: The temp list to put pages on to.
  816. * @scanned: The number of pages that were scanned.
  817. * @order: The caller's attempted allocation order
  818. * @mode: One of the LRU isolation modes
  819. * @file: True [1] if isolating file [!anon] pages
  820. *
  821. * returns how many pages were moved onto *@dst.
  822. */
  823. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  824. struct list_head *src, struct list_head *dst,
  825. unsigned long *scanned, int order, int mode, int file)
  826. {
  827. unsigned long nr_taken = 0;
  828. unsigned long nr_lumpy_taken = 0;
  829. unsigned long nr_lumpy_dirty = 0;
  830. unsigned long nr_lumpy_failed = 0;
  831. unsigned long scan;
  832. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  833. struct page *page;
  834. unsigned long pfn;
  835. unsigned long end_pfn;
  836. unsigned long page_pfn;
  837. int zone_id;
  838. page = lru_to_page(src);
  839. prefetchw_prev_lru_page(page, src, flags);
  840. VM_BUG_ON(!PageLRU(page));
  841. switch (__isolate_lru_page(page, mode, file)) {
  842. case 0:
  843. list_move(&page->lru, dst);
  844. mem_cgroup_del_lru(page);
  845. nr_taken++;
  846. break;
  847. case -EBUSY:
  848. /* else it is being freed elsewhere */
  849. list_move(&page->lru, src);
  850. mem_cgroup_rotate_lru_list(page, page_lru(page));
  851. continue;
  852. default:
  853. BUG();
  854. }
  855. if (!order)
  856. continue;
  857. /*
  858. * Attempt to take all pages in the order aligned region
  859. * surrounding the tag page. Only take those pages of
  860. * the same active state as that tag page. We may safely
  861. * round the target page pfn down to the requested order
  862. * as the mem_map is guarenteed valid out to MAX_ORDER,
  863. * where that page is in a different zone we will detect
  864. * it from its zone id and abort this block scan.
  865. */
  866. zone_id = page_zone_id(page);
  867. page_pfn = page_to_pfn(page);
  868. pfn = page_pfn & ~((1 << order) - 1);
  869. end_pfn = pfn + (1 << order);
  870. for (; pfn < end_pfn; pfn++) {
  871. struct page *cursor_page;
  872. /* The target page is in the block, ignore it. */
  873. if (unlikely(pfn == page_pfn))
  874. continue;
  875. /* Avoid holes within the zone. */
  876. if (unlikely(!pfn_valid_within(pfn)))
  877. break;
  878. cursor_page = pfn_to_page(pfn);
  879. /* Check that we have not crossed a zone boundary. */
  880. if (unlikely(page_zone_id(cursor_page) != zone_id))
  881. continue;
  882. /*
  883. * If we don't have enough swap space, reclaiming of
  884. * anon page which don't already have a swap slot is
  885. * pointless.
  886. */
  887. if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
  888. !PageSwapCache(cursor_page))
  889. continue;
  890. if (__isolate_lru_page(cursor_page, mode, file) == 0) {
  891. list_move(&cursor_page->lru, dst);
  892. mem_cgroup_del_lru(cursor_page);
  893. nr_taken++;
  894. nr_lumpy_taken++;
  895. if (PageDirty(cursor_page))
  896. nr_lumpy_dirty++;
  897. scan++;
  898. } else {
  899. if (mode == ISOLATE_BOTH &&
  900. page_count(cursor_page))
  901. nr_lumpy_failed++;
  902. }
  903. }
  904. }
  905. *scanned = scan;
  906. trace_mm_vmscan_lru_isolate(order,
  907. nr_to_scan, scan,
  908. nr_taken,
  909. nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
  910. mode);
  911. return nr_taken;
  912. }
  913. static unsigned long isolate_pages_global(unsigned long nr,
  914. struct list_head *dst,
  915. unsigned long *scanned, int order,
  916. int mode, struct zone *z,
  917. int active, int file)
  918. {
  919. int lru = LRU_BASE;
  920. if (active)
  921. lru += LRU_ACTIVE;
  922. if (file)
  923. lru += LRU_FILE;
  924. return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
  925. mode, file);
  926. }
  927. /*
  928. * clear_active_flags() is a helper for shrink_active_list(), clearing
  929. * any active bits from the pages in the list.
  930. */
  931. static unsigned long clear_active_flags(struct list_head *page_list,
  932. unsigned int *count)
  933. {
  934. int nr_active = 0;
  935. int lru;
  936. struct page *page;
  937. list_for_each_entry(page, page_list, lru) {
  938. lru = page_lru_base_type(page);
  939. if (PageActive(page)) {
  940. lru += LRU_ACTIVE;
  941. ClearPageActive(page);
  942. nr_active++;
  943. }
  944. if (count)
  945. count[lru]++;
  946. }
  947. return nr_active;
  948. }
  949. /**
  950. * isolate_lru_page - tries to isolate a page from its LRU list
  951. * @page: page to isolate from its LRU list
  952. *
  953. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  954. * vmstat statistic corresponding to whatever LRU list the page was on.
  955. *
  956. * Returns 0 if the page was removed from an LRU list.
  957. * Returns -EBUSY if the page was not on an LRU list.
  958. *
  959. * The returned page will have PageLRU() cleared. If it was found on
  960. * the active list, it will have PageActive set. If it was found on
  961. * the unevictable list, it will have the PageUnevictable bit set. That flag
  962. * may need to be cleared by the caller before letting the page go.
  963. *
  964. * The vmstat statistic corresponding to the list on which the page was
  965. * found will be decremented.
  966. *
  967. * Restrictions:
  968. * (1) Must be called with an elevated refcount on the page. This is a
  969. * fundamentnal difference from isolate_lru_pages (which is called
  970. * without a stable reference).
  971. * (2) the lru_lock must not be held.
  972. * (3) interrupts must be enabled.
  973. */
  974. int isolate_lru_page(struct page *page)
  975. {
  976. int ret = -EBUSY;
  977. if (PageLRU(page)) {
  978. struct zone *zone = page_zone(page);
  979. spin_lock_irq(&zone->lru_lock);
  980. if (PageLRU(page) && get_page_unless_zero(page)) {
  981. int lru = page_lru(page);
  982. ret = 0;
  983. ClearPageLRU(page);
  984. del_page_from_lru_list(zone, page, lru);
  985. }
  986. spin_unlock_irq(&zone->lru_lock);
  987. }
  988. return ret;
  989. }
  990. /*
  991. * Are there way too many processes in the direct reclaim path already?
  992. */
  993. static int too_many_isolated(struct zone *zone, int file,
  994. struct scan_control *sc)
  995. {
  996. unsigned long inactive, isolated;
  997. if (current_is_kswapd())
  998. return 0;
  999. if (!scanning_global_lru(sc))
  1000. return 0;
  1001. if (file) {
  1002. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1003. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  1004. } else {
  1005. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1006. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  1007. }
  1008. return isolated > inactive;
  1009. }
  1010. /*
  1011. * TODO: Try merging with migrations version of putback_lru_pages
  1012. */
  1013. static noinline_for_stack void
  1014. putback_lru_pages(struct zone *zone, struct scan_control *sc,
  1015. unsigned long nr_anon, unsigned long nr_file,
  1016. struct list_head *page_list)
  1017. {
  1018. struct page *page;
  1019. struct pagevec pvec;
  1020. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1021. pagevec_init(&pvec, 1);
  1022. /*
  1023. * Put back any unfreeable pages.
  1024. */
  1025. spin_lock(&zone->lru_lock);
  1026. while (!list_empty(page_list)) {
  1027. int lru;
  1028. page = lru_to_page(page_list);
  1029. VM_BUG_ON(PageLRU(page));
  1030. list_del(&page->lru);
  1031. if (unlikely(!page_evictable(page, NULL))) {
  1032. spin_unlock_irq(&zone->lru_lock);
  1033. putback_lru_page(page);
  1034. spin_lock_irq(&zone->lru_lock);
  1035. continue;
  1036. }
  1037. SetPageLRU(page);
  1038. lru = page_lru(page);
  1039. add_page_to_lru_list(zone, page, lru);
  1040. if (is_active_lru(lru)) {
  1041. int file = is_file_lru(lru);
  1042. reclaim_stat->recent_rotated[file]++;
  1043. }
  1044. if (!pagevec_add(&pvec, page)) {
  1045. spin_unlock_irq(&zone->lru_lock);
  1046. __pagevec_release(&pvec);
  1047. spin_lock_irq(&zone->lru_lock);
  1048. }
  1049. }
  1050. __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
  1051. __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
  1052. spin_unlock_irq(&zone->lru_lock);
  1053. pagevec_release(&pvec);
  1054. }
  1055. static noinline_for_stack void update_isolated_counts(struct zone *zone,
  1056. struct scan_control *sc,
  1057. unsigned long *nr_anon,
  1058. unsigned long *nr_file,
  1059. struct list_head *isolated_list)
  1060. {
  1061. unsigned long nr_active;
  1062. unsigned int count[NR_LRU_LISTS] = { 0, };
  1063. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1064. nr_active = clear_active_flags(isolated_list, count);
  1065. __count_vm_events(PGDEACTIVATE, nr_active);
  1066. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  1067. -count[LRU_ACTIVE_FILE]);
  1068. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  1069. -count[LRU_INACTIVE_FILE]);
  1070. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  1071. -count[LRU_ACTIVE_ANON]);
  1072. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  1073. -count[LRU_INACTIVE_ANON]);
  1074. *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
  1075. *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
  1076. __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
  1077. __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
  1078. reclaim_stat->recent_scanned[0] += *nr_anon;
  1079. reclaim_stat->recent_scanned[1] += *nr_file;
  1080. }
  1081. /*
  1082. * Returns true if the caller should wait to clean dirty/writeback pages.
  1083. *
  1084. * If we are direct reclaiming for contiguous pages and we do not reclaim
  1085. * everything in the list, try again and wait for writeback IO to complete.
  1086. * This will stall high-order allocations noticeably. Only do that when really
  1087. * need to free the pages under high memory pressure.
  1088. */
  1089. static inline bool should_reclaim_stall(unsigned long nr_taken,
  1090. unsigned long nr_freed,
  1091. int priority,
  1092. struct scan_control *sc)
  1093. {
  1094. int lumpy_stall_priority;
  1095. /* kswapd should not stall on sync IO */
  1096. if (current_is_kswapd())
  1097. return false;
  1098. /* Only stall on lumpy reclaim */
  1099. if (!sc->lumpy_reclaim_mode)
  1100. return false;
  1101. /* If we have relaimed everything on the isolated list, no stall */
  1102. if (nr_freed == nr_taken)
  1103. return false;
  1104. /*
  1105. * For high-order allocations, there are two stall thresholds.
  1106. * High-cost allocations stall immediately where as lower
  1107. * order allocations such as stacks require the scanning
  1108. * priority to be much higher before stalling.
  1109. */
  1110. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  1111. lumpy_stall_priority = DEF_PRIORITY;
  1112. else
  1113. lumpy_stall_priority = DEF_PRIORITY / 3;
  1114. return priority <= lumpy_stall_priority;
  1115. }
  1116. /*
  1117. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  1118. * of reclaimed pages
  1119. */
  1120. static noinline_for_stack unsigned long
  1121. shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
  1122. struct scan_control *sc, int priority, int file)
  1123. {
  1124. LIST_HEAD(page_list);
  1125. unsigned long nr_scanned;
  1126. unsigned long nr_reclaimed = 0;
  1127. unsigned long nr_taken;
  1128. unsigned long nr_active;
  1129. unsigned long nr_anon;
  1130. unsigned long nr_file;
  1131. while (unlikely(too_many_isolated(zone, file, sc))) {
  1132. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1133. /* We are about to die and free our memory. Return now. */
  1134. if (fatal_signal_pending(current))
  1135. return SWAP_CLUSTER_MAX;
  1136. }
  1137. lru_add_drain();
  1138. spin_lock_irq(&zone->lru_lock);
  1139. if (scanning_global_lru(sc)) {
  1140. nr_taken = isolate_pages_global(nr_to_scan,
  1141. &page_list, &nr_scanned, sc->order,
  1142. sc->lumpy_reclaim_mode ?
  1143. ISOLATE_BOTH : ISOLATE_INACTIVE,
  1144. zone, 0, file);
  1145. zone->pages_scanned += nr_scanned;
  1146. if (current_is_kswapd())
  1147. __count_zone_vm_events(PGSCAN_KSWAPD, zone,
  1148. nr_scanned);
  1149. else
  1150. __count_zone_vm_events(PGSCAN_DIRECT, zone,
  1151. nr_scanned);
  1152. } else {
  1153. nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
  1154. &page_list, &nr_scanned, sc->order,
  1155. sc->lumpy_reclaim_mode ?
  1156. ISOLATE_BOTH : ISOLATE_INACTIVE,
  1157. zone, sc->mem_cgroup,
  1158. 0, file);
  1159. /*
  1160. * mem_cgroup_isolate_pages() keeps track of
  1161. * scanned pages on its own.
  1162. */
  1163. }
  1164. if (nr_taken == 0) {
  1165. spin_unlock_irq(&zone->lru_lock);
  1166. return 0;
  1167. }
  1168. update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
  1169. spin_unlock_irq(&zone->lru_lock);
  1170. nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
  1171. /* Check if we should syncronously wait for writeback */
  1172. if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
  1173. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1174. /*
  1175. * The attempt at page out may have made some
  1176. * of the pages active, mark them inactive again.
  1177. */
  1178. nr_active = clear_active_flags(&page_list, NULL);
  1179. count_vm_events(PGDEACTIVATE, nr_active);
  1180. nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
  1181. }
  1182. local_irq_disable();
  1183. if (current_is_kswapd())
  1184. __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
  1185. __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
  1186. putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
  1187. return nr_reclaimed;
  1188. }
  1189. /*
  1190. * This moves pages from the active list to the inactive list.
  1191. *
  1192. * We move them the other way if the page is referenced by one or more
  1193. * processes, from rmap.
  1194. *
  1195. * If the pages are mostly unmapped, the processing is fast and it is
  1196. * appropriate to hold zone->lru_lock across the whole operation. But if
  1197. * the pages are mapped, the processing is slow (page_referenced()) so we
  1198. * should drop zone->lru_lock around each page. It's impossible to balance
  1199. * this, so instead we remove the pages from the LRU while processing them.
  1200. * It is safe to rely on PG_active against the non-LRU pages in here because
  1201. * nobody will play with that bit on a non-LRU page.
  1202. *
  1203. * The downside is that we have to touch page->_count against each page.
  1204. * But we had to alter page->flags anyway.
  1205. */
  1206. static void move_active_pages_to_lru(struct zone *zone,
  1207. struct list_head *list,
  1208. enum lru_list lru)
  1209. {
  1210. unsigned long pgmoved = 0;
  1211. struct pagevec pvec;
  1212. struct page *page;
  1213. pagevec_init(&pvec, 1);
  1214. while (!list_empty(list)) {
  1215. page = lru_to_page(list);
  1216. VM_BUG_ON(PageLRU(page));
  1217. SetPageLRU(page);
  1218. list_move(&page->lru, &zone->lru[lru].list);
  1219. mem_cgroup_add_lru_list(page, lru);
  1220. pgmoved++;
  1221. if (!pagevec_add(&pvec, page) || list_empty(list)) {
  1222. spin_unlock_irq(&zone->lru_lock);
  1223. if (buffer_heads_over_limit)
  1224. pagevec_strip(&pvec);
  1225. __pagevec_release(&pvec);
  1226. spin_lock_irq(&zone->lru_lock);
  1227. }
  1228. }
  1229. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1230. if (!is_active_lru(lru))
  1231. __count_vm_events(PGDEACTIVATE, pgmoved);
  1232. }
  1233. static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  1234. struct scan_control *sc, int priority, int file)
  1235. {
  1236. unsigned long nr_taken;
  1237. unsigned long pgscanned;
  1238. unsigned long vm_flags;
  1239. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1240. LIST_HEAD(l_active);
  1241. LIST_HEAD(l_inactive);
  1242. struct page *page;
  1243. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1244. unsigned long nr_rotated = 0;
  1245. lru_add_drain();
  1246. spin_lock_irq(&zone->lru_lock);
  1247. if (scanning_global_lru(sc)) {
  1248. nr_taken = isolate_pages_global(nr_pages, &l_hold,
  1249. &pgscanned, sc->order,
  1250. ISOLATE_ACTIVE, zone,
  1251. 1, file);
  1252. zone->pages_scanned += pgscanned;
  1253. } else {
  1254. nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
  1255. &pgscanned, sc->order,
  1256. ISOLATE_ACTIVE, zone,
  1257. sc->mem_cgroup, 1, file);
  1258. /*
  1259. * mem_cgroup_isolate_pages() keeps track of
  1260. * scanned pages on its own.
  1261. */
  1262. }
  1263. reclaim_stat->recent_scanned[file] += nr_taken;
  1264. __count_zone_vm_events(PGREFILL, zone, pgscanned);
  1265. if (file)
  1266. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
  1267. else
  1268. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
  1269. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1270. spin_unlock_irq(&zone->lru_lock);
  1271. while (!list_empty(&l_hold)) {
  1272. cond_resched();
  1273. page = lru_to_page(&l_hold);
  1274. list_del(&page->lru);
  1275. if (unlikely(!page_evictable(page, NULL))) {
  1276. putback_lru_page(page);
  1277. continue;
  1278. }
  1279. if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
  1280. nr_rotated++;
  1281. /*
  1282. * Identify referenced, file-backed active pages and
  1283. * give them one more trip around the active list. So
  1284. * that executable code get better chances to stay in
  1285. * memory under moderate memory pressure. Anon pages
  1286. * are not likely to be evicted by use-once streaming
  1287. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1288. * so we ignore them here.
  1289. */
  1290. if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
  1291. list_add(&page->lru, &l_active);
  1292. continue;
  1293. }
  1294. }
  1295. ClearPageActive(page); /* we are de-activating */
  1296. list_add(&page->lru, &l_inactive);
  1297. }
  1298. /*
  1299. * Move pages back to the lru list.
  1300. */
  1301. spin_lock_irq(&zone->lru_lock);
  1302. /*
  1303. * Count referenced pages from currently used mappings as rotated,
  1304. * even though only some of them are actually re-activated. This
  1305. * helps balance scan pressure between file and anonymous pages in
  1306. * get_scan_ratio.
  1307. */
  1308. reclaim_stat->recent_rotated[file] += nr_rotated;
  1309. move_active_pages_to_lru(zone, &l_active,
  1310. LRU_ACTIVE + file * LRU_FILE);
  1311. move_active_pages_to_lru(zone, &l_inactive,
  1312. LRU_BASE + file * LRU_FILE);
  1313. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1314. spin_unlock_irq(&zone->lru_lock);
  1315. }
  1316. static int inactive_anon_is_low_global(struct zone *zone)
  1317. {
  1318. unsigned long active, inactive;
  1319. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1320. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1321. if (inactive * zone->inactive_ratio < active)
  1322. return 1;
  1323. return 0;
  1324. }
  1325. /**
  1326. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1327. * @zone: zone to check
  1328. * @sc: scan control of this context
  1329. *
  1330. * Returns true if the zone does not have enough inactive anon pages,
  1331. * meaning some active anon pages need to be deactivated.
  1332. */
  1333. static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
  1334. {
  1335. int low;
  1336. if (scanning_global_lru(sc))
  1337. low = inactive_anon_is_low_global(zone);
  1338. else
  1339. low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
  1340. return low;
  1341. }
  1342. static int inactive_file_is_low_global(struct zone *zone)
  1343. {
  1344. unsigned long active, inactive;
  1345. active = zone_page_state(zone, NR_ACTIVE_FILE);
  1346. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1347. return (active > inactive);
  1348. }
  1349. /**
  1350. * inactive_file_is_low - check if file pages need to be deactivated
  1351. * @zone: zone to check
  1352. * @sc: scan control of this context
  1353. *
  1354. * When the system is doing streaming IO, memory pressure here
  1355. * ensures that active file pages get deactivated, until more
  1356. * than half of the file pages are on the inactive list.
  1357. *
  1358. * Once we get to that situation, protect the system's working
  1359. * set from being evicted by disabling active file page aging.
  1360. *
  1361. * This uses a different ratio than the anonymous pages, because
  1362. * the page cache uses a use-once replacement algorithm.
  1363. */
  1364. static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
  1365. {
  1366. int low;
  1367. if (scanning_global_lru(sc))
  1368. low = inactive_file_is_low_global(zone);
  1369. else
  1370. low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
  1371. return low;
  1372. }
  1373. static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
  1374. int file)
  1375. {
  1376. if (file)
  1377. return inactive_file_is_low(zone, sc);
  1378. else
  1379. return inactive_anon_is_low(zone, sc);
  1380. }
  1381. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1382. struct zone *zone, struct scan_control *sc, int priority)
  1383. {
  1384. int file = is_file_lru(lru);
  1385. if (is_active_lru(lru)) {
  1386. if (inactive_list_is_low(zone, sc, file))
  1387. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1388. return 0;
  1389. }
  1390. return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
  1391. }
  1392. /*
  1393. * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
  1394. * until we collected @swap_cluster_max pages to scan.
  1395. */
  1396. static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
  1397. unsigned long *nr_saved_scan)
  1398. {
  1399. unsigned long nr;
  1400. *nr_saved_scan += nr_to_scan;
  1401. nr = *nr_saved_scan;
  1402. if (nr >= SWAP_CLUSTER_MAX)
  1403. *nr_saved_scan = 0;
  1404. else
  1405. nr = 0;
  1406. return nr;
  1407. }
  1408. /*
  1409. * Determine how aggressively the anon and file LRU lists should be
  1410. * scanned. The relative value of each set of LRU lists is determined
  1411. * by looking at the fraction of the pages scanned we did rotate back
  1412. * onto the active list instead of evict.
  1413. *
  1414. * nr[0] = anon pages to scan; nr[1] = file pages to scan
  1415. */
  1416. static void get_scan_count(struct zone *zone, struct scan_control *sc,
  1417. unsigned long *nr, int priority)
  1418. {
  1419. unsigned long anon, file, free;
  1420. unsigned long anon_prio, file_prio;
  1421. unsigned long ap, fp;
  1422. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1423. u64 fraction[2], denominator;
  1424. enum lru_list l;
  1425. int noswap = 0;
  1426. /* If we have no swap space, do not bother scanning anon pages. */
  1427. if (!sc->may_swap || (nr_swap_pages <= 0)) {
  1428. noswap = 1;
  1429. fraction[0] = 0;
  1430. fraction[1] = 1;
  1431. denominator = 1;
  1432. goto out;
  1433. }
  1434. anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
  1435. zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
  1436. file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
  1437. zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
  1438. if (scanning_global_lru(sc)) {
  1439. free = zone_page_state(zone, NR_FREE_PAGES);
  1440. /* If we have very few page cache pages,
  1441. force-scan anon pages. */
  1442. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1443. fraction[0] = 1;
  1444. fraction[1] = 0;
  1445. denominator = 1;
  1446. goto out;
  1447. }
  1448. }
  1449. /*
  1450. * With swappiness at 100, anonymous and file have the same priority.
  1451. * This scanning priority is essentially the inverse of IO cost.
  1452. */
  1453. anon_prio = sc->swappiness;
  1454. file_prio = 200 - sc->swappiness;
  1455. /*
  1456. * OK, so we have swap space and a fair amount of page cache
  1457. * pages. We use the recently rotated / recently scanned
  1458. * ratios to determine how valuable each cache is.
  1459. *
  1460. * Because workloads change over time (and to avoid overflow)
  1461. * we keep these statistics as a floating average, which ends
  1462. * up weighing recent references more than old ones.
  1463. *
  1464. * anon in [0], file in [1]
  1465. */
  1466. spin_lock_irq(&zone->lru_lock);
  1467. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1468. reclaim_stat->recent_scanned[0] /= 2;
  1469. reclaim_stat->recent_rotated[0] /= 2;
  1470. }
  1471. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1472. reclaim_stat->recent_scanned[1] /= 2;
  1473. reclaim_stat->recent_rotated[1] /= 2;
  1474. }
  1475. /*
  1476. * The amount of pressure on anon vs file pages is inversely
  1477. * proportional to the fraction of recently scanned pages on
  1478. * each list that were recently referenced and in active use.
  1479. */
  1480. ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
  1481. ap /= reclaim_stat->recent_rotated[0] + 1;
  1482. fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
  1483. fp /= reclaim_stat->recent_rotated[1] + 1;
  1484. spin_unlock_irq(&zone->lru_lock);
  1485. fraction[0] = ap;
  1486. fraction[1] = fp;
  1487. denominator = ap + fp + 1;
  1488. out:
  1489. for_each_evictable_lru(l) {
  1490. int file = is_file_lru(l);
  1491. unsigned long scan;
  1492. scan = zone_nr_lru_pages(zone, sc, l);
  1493. if (priority || noswap) {
  1494. scan >>= priority;
  1495. scan = div64_u64(scan * fraction[file], denominator);
  1496. }
  1497. nr[l] = nr_scan_try_batch(scan,
  1498. &reclaim_stat->nr_saved_scan[l]);
  1499. }
  1500. }
  1501. static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
  1502. {
  1503. /*
  1504. * If we need a large contiguous chunk of memory, or have
  1505. * trouble getting a small set of contiguous pages, we
  1506. * will reclaim both active and inactive pages.
  1507. */
  1508. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  1509. sc->lumpy_reclaim_mode = 1;
  1510. else if (sc->order && priority < DEF_PRIORITY - 2)
  1511. sc->lumpy_reclaim_mode = 1;
  1512. else
  1513. sc->lumpy_reclaim_mode = 0;
  1514. }
  1515. /*
  1516. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1517. */
  1518. static void shrink_zone(int priority, struct zone *zone,
  1519. struct scan_control *sc)
  1520. {
  1521. unsigned long nr[NR_LRU_LISTS];
  1522. unsigned long nr_to_scan;
  1523. enum lru_list l;
  1524. unsigned long nr_reclaimed = sc->nr_reclaimed;
  1525. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  1526. get_scan_count(zone, sc, nr, priority);
  1527. set_lumpy_reclaim_mode(priority, sc);
  1528. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1529. nr[LRU_INACTIVE_FILE]) {
  1530. for_each_evictable_lru(l) {
  1531. if (nr[l]) {
  1532. nr_to_scan = min_t(unsigned long,
  1533. nr[l], SWAP_CLUSTER_MAX);
  1534. nr[l] -= nr_to_scan;
  1535. nr_reclaimed += shrink_list(l, nr_to_scan,
  1536. zone, sc, priority);
  1537. }
  1538. }
  1539. /*
  1540. * On large memory systems, scan >> priority can become
  1541. * really large. This is fine for the starting priority;
  1542. * we want to put equal scanning pressure on each zone.
  1543. * However, if the VM has a harder time of freeing pages,
  1544. * with multiple processes reclaiming pages, the total
  1545. * freeing target can get unreasonably large.
  1546. */
  1547. if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
  1548. break;
  1549. }
  1550. sc->nr_reclaimed = nr_reclaimed;
  1551. /*
  1552. * Even if we did not try to evict anon pages at all, we want to
  1553. * rebalance the anon lru active/inactive ratio.
  1554. */
  1555. if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
  1556. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1557. throttle_vm_writeout(sc->gfp_mask);
  1558. }
  1559. /*
  1560. * This is the direct reclaim path, for page-allocating processes. We only
  1561. * try to reclaim pages from zones which will satisfy the caller's allocation
  1562. * request.
  1563. *
  1564. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1565. * Because:
  1566. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1567. * allocation or
  1568. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1569. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1570. * zone defense algorithm.
  1571. *
  1572. * If a zone is deemed to be full of pinned pages then just give it a light
  1573. * scan then give up on it.
  1574. */
  1575. static bool shrink_zones(int priority, struct zonelist *zonelist,
  1576. struct scan_control *sc)
  1577. {
  1578. struct zoneref *z;
  1579. struct zone *zone;
  1580. bool all_unreclaimable = true;
  1581. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1582. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1583. if (!populated_zone(zone))
  1584. continue;
  1585. /*
  1586. * Take care memory controller reclaiming has small influence
  1587. * to global LRU.
  1588. */
  1589. if (scanning_global_lru(sc)) {
  1590. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1591. continue;
  1592. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  1593. continue; /* Let kswapd poll it */
  1594. }
  1595. shrink_zone(priority, zone, sc);
  1596. all_unreclaimable = false;
  1597. }
  1598. return all_unreclaimable;
  1599. }
  1600. /*
  1601. * This is the main entry point to direct page reclaim.
  1602. *
  1603. * If a full scan of the inactive list fails to free enough memory then we
  1604. * are "out of memory" and something needs to be killed.
  1605. *
  1606. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1607. * high - the zone may be full of dirty or under-writeback pages, which this
  1608. * caller can't do much about. We kick the writeback threads and take explicit
  1609. * naps in the hope that some of these pages can be written. But if the
  1610. * allocating task holds filesystem locks which prevent writeout this might not
  1611. * work, and the allocation attempt will fail.
  1612. *
  1613. * returns: 0, if no pages reclaimed
  1614. * else, the number of pages reclaimed
  1615. */
  1616. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1617. struct scan_control *sc)
  1618. {
  1619. int priority;
  1620. bool all_unreclaimable;
  1621. unsigned long total_scanned = 0;
  1622. struct reclaim_state *reclaim_state = current->reclaim_state;
  1623. struct zoneref *z;
  1624. struct zone *zone;
  1625. unsigned long writeback_threshold;
  1626. get_mems_allowed();
  1627. delayacct_freepages_start();
  1628. if (scanning_global_lru(sc))
  1629. count_vm_event(ALLOCSTALL);
  1630. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1631. sc->nr_scanned = 0;
  1632. if (!priority)
  1633. disable_swap_token();
  1634. all_unreclaimable = shrink_zones(priority, zonelist, sc);
  1635. /*
  1636. * Don't shrink slabs when reclaiming memory from
  1637. * over limit cgroups
  1638. */
  1639. if (scanning_global_lru(sc)) {
  1640. unsigned long lru_pages = 0;
  1641. for_each_zone_zonelist(zone, z, zonelist,
  1642. gfp_zone(sc->gfp_mask)) {
  1643. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1644. continue;
  1645. lru_pages += zone_reclaimable_pages(zone);
  1646. }
  1647. shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
  1648. if (reclaim_state) {
  1649. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  1650. reclaim_state->reclaimed_slab = 0;
  1651. }
  1652. }
  1653. total_scanned += sc->nr_scanned;
  1654. if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  1655. goto out;
  1656. /*
  1657. * Try to write back as many pages as we just scanned. This
  1658. * tends to cause slow streaming writers to write data to the
  1659. * disk smoothly, at the dirtying rate, which is nice. But
  1660. * that's undesirable in laptop mode, where we *want* lumpy
  1661. * writeout. So in laptop mode, write out the whole world.
  1662. */
  1663. writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  1664. if (total_scanned > writeback_threshold) {
  1665. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
  1666. sc->may_writepage = 1;
  1667. }
  1668. /* Take a nap, wait for some writeback to complete */
  1669. if (!sc->hibernation_mode && sc->nr_scanned &&
  1670. priority < DEF_PRIORITY - 2)
  1671. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1672. }
  1673. out:
  1674. /*
  1675. * Now that we've scanned all the zones at this priority level, note
  1676. * that level within the zone so that the next thread which performs
  1677. * scanning of this zone will immediately start out at this priority
  1678. * level. This affects only the decision whether or not to bring
  1679. * mapped pages onto the inactive list.
  1680. */
  1681. if (priority < 0)
  1682. priority = 0;
  1683. delayacct_freepages_end();
  1684. put_mems_allowed();
  1685. if (sc->nr_reclaimed)
  1686. return sc->nr_reclaimed;
  1687. /* top priority shrink_zones still had more to do? don't OOM, then */
  1688. if (scanning_global_lru(sc) && !all_unreclaimable)
  1689. return 1;
  1690. return 0;
  1691. }
  1692. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1693. gfp_t gfp_mask, nodemask_t *nodemask)
  1694. {
  1695. unsigned long nr_reclaimed;
  1696. struct scan_control sc = {
  1697. .gfp_mask = gfp_mask,
  1698. .may_writepage = !laptop_mode,
  1699. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  1700. .may_unmap = 1,
  1701. .may_swap = 1,
  1702. .swappiness = vm_swappiness,
  1703. .order = order,
  1704. .mem_cgroup = NULL,
  1705. .nodemask = nodemask,
  1706. };
  1707. trace_mm_vmscan_direct_reclaim_begin(order,
  1708. sc.may_writepage,
  1709. gfp_mask);
  1710. nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
  1711. trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
  1712. return nr_reclaimed;
  1713. }
  1714. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1715. unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
  1716. gfp_t gfp_mask, bool noswap,
  1717. unsigned int swappiness,
  1718. struct zone *zone, int nid)
  1719. {
  1720. struct scan_control sc = {
  1721. .may_writepage = !laptop_mode,
  1722. .may_unmap = 1,
  1723. .may_swap = !noswap,
  1724. .swappiness = swappiness,
  1725. .order = 0,
  1726. .mem_cgroup = mem,
  1727. };
  1728. nodemask_t nm = nodemask_of_node(nid);
  1729. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1730. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1731. sc.nodemask = &nm;
  1732. sc.nr_reclaimed = 0;
  1733. sc.nr_scanned = 0;
  1734. trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
  1735. sc.may_writepage,
  1736. sc.gfp_mask);
  1737. /*
  1738. * NOTE: Although we can get the priority field, using it
  1739. * here is not a good idea, since it limits the pages we can scan.
  1740. * if we don't reclaim here, the shrink_zone from balance_pgdat
  1741. * will pick up pages from other mem cgroup's as well. We hack
  1742. * the priority and make it zero.
  1743. */
  1744. shrink_zone(0, zone, &sc);
  1745. trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
  1746. return sc.nr_reclaimed;
  1747. }
  1748. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  1749. gfp_t gfp_mask,
  1750. bool noswap,
  1751. unsigned int swappiness)
  1752. {
  1753. struct zonelist *zonelist;
  1754. unsigned long nr_reclaimed;
  1755. struct scan_control sc = {
  1756. .may_writepage = !laptop_mode,
  1757. .may_unmap = 1,
  1758. .may_swap = !noswap,
  1759. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  1760. .swappiness = swappiness,
  1761. .order = 0,
  1762. .mem_cgroup = mem_cont,
  1763. .nodemask = NULL, /* we don't care the placement */
  1764. };
  1765. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1766. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1767. zonelist = NODE_DATA(numa_node_id())->node_zonelists;
  1768. trace_mm_vmscan_memcg_reclaim_begin(0,
  1769. sc.may_writepage,
  1770. sc.gfp_mask);
  1771. nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
  1772. trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
  1773. return nr_reclaimed;
  1774. }
  1775. #endif
  1776. /* is kswapd sleeping prematurely? */
  1777. static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
  1778. {
  1779. int i;
  1780. /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
  1781. if (remaining)
  1782. return 1;
  1783. /* If after HZ/10, a zone is below the high mark, it's premature */
  1784. for (i = 0; i < pgdat->nr_zones; i++) {
  1785. struct zone *zone = pgdat->node_zones + i;
  1786. if (!populated_zone(zone))
  1787. continue;
  1788. if (zone->all_unreclaimable)
  1789. continue;
  1790. if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
  1791. 0, 0))
  1792. return 1;
  1793. }
  1794. return 0;
  1795. }
  1796. /*
  1797. * For kswapd, balance_pgdat() will work across all this node's zones until
  1798. * they are all at high_wmark_pages(zone).
  1799. *
  1800. * Returns the number of pages which were actually freed.
  1801. *
  1802. * There is special handling here for zones which are full of pinned pages.
  1803. * This can happen if the pages are all mlocked, or if they are all used by
  1804. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  1805. * What we do is to detect the case where all pages in the zone have been
  1806. * scanned twice and there has been zero successful reclaim. Mark the zone as
  1807. * dead and from now on, only perform a short scan. Basically we're polling
  1808. * the zone for when the problem goes away.
  1809. *
  1810. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  1811. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  1812. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  1813. * lower zones regardless of the number of free pages in the lower zones. This
  1814. * interoperates with the page allocator fallback scheme to ensure that aging
  1815. * of pages is balanced across the zones.
  1816. */
  1817. static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
  1818. {
  1819. int all_zones_ok;
  1820. int priority;
  1821. int i;
  1822. unsigned long total_scanned;
  1823. struct reclaim_state *reclaim_state = current->reclaim_state;
  1824. struct scan_control sc = {
  1825. .gfp_mask = GFP_KERNEL,
  1826. .may_unmap = 1,
  1827. .may_swap = 1,
  1828. /*
  1829. * kswapd doesn't want to be bailed out while reclaim. because
  1830. * we want to put equal scanning pressure on each zone.
  1831. */
  1832. .nr_to_reclaim = ULONG_MAX,
  1833. .swappiness = vm_swappiness,
  1834. .order = order,
  1835. .mem_cgroup = NULL,
  1836. };
  1837. loop_again:
  1838. total_scanned = 0;
  1839. sc.nr_reclaimed = 0;
  1840. sc.may_writepage = !laptop_mode;
  1841. count_vm_event(PAGEOUTRUN);
  1842. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1843. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  1844. unsigned long lru_pages = 0;
  1845. int has_under_min_watermark_zone = 0;
  1846. /* The swap token gets in the way of swapout... */
  1847. if (!priority)
  1848. disable_swap_token();
  1849. all_zones_ok = 1;
  1850. /*
  1851. * Scan in the highmem->dma direction for the highest
  1852. * zone which needs scanning
  1853. */
  1854. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  1855. struct zone *zone = pgdat->node_zones + i;
  1856. if (!populated_zone(zone))
  1857. continue;
  1858. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  1859. continue;
  1860. /*
  1861. * Do some background aging of the anon list, to give
  1862. * pages a chance to be referenced before reclaiming.
  1863. */
  1864. if (inactive_anon_is_low(zone, &sc))
  1865. shrink_active_list(SWAP_CLUSTER_MAX, zone,
  1866. &sc, priority, 0);
  1867. if (!zone_watermark_ok(zone, order,
  1868. high_wmark_pages(zone), 0, 0)) {
  1869. end_zone = i;
  1870. break;
  1871. }
  1872. }
  1873. if (i < 0)
  1874. goto out;
  1875. for (i = 0; i <= end_zone; i++) {
  1876. struct zone *zone = pgdat->node_zones + i;
  1877. lru_pages += zone_reclaimable_pages(zone);
  1878. }
  1879. /*
  1880. * Now scan the zone in the dma->highmem direction, stopping
  1881. * at the last zone which needs scanning.
  1882. *
  1883. * We do this because the page allocator works in the opposite
  1884. * direction. This prevents the page allocator from allocating
  1885. * pages behind kswapd's direction of progress, which would
  1886. * cause too much scanning of the lower zones.
  1887. */
  1888. for (i = 0; i <= end_zone; i++) {
  1889. struct zone *zone = pgdat->node_zones + i;
  1890. int nr_slab;
  1891. int nid, zid;
  1892. if (!populated_zone(zone))
  1893. continue;
  1894. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  1895. continue;
  1896. sc.nr_scanned = 0;
  1897. nid = pgdat->node_id;
  1898. zid = zone_idx(zone);
  1899. /*
  1900. * Call soft limit reclaim before calling shrink_zone.
  1901. * For now we ignore the return value
  1902. */
  1903. mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
  1904. nid, zid);
  1905. /*
  1906. * We put equal pressure on every zone, unless one
  1907. * zone has way too many pages free already.
  1908. */
  1909. if (!zone_watermark_ok(zone, order,
  1910. 8*high_wmark_pages(zone), end_zone, 0))
  1911. shrink_zone(priority, zone, &sc);
  1912. reclaim_state->reclaimed_slab = 0;
  1913. nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
  1914. lru_pages);
  1915. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  1916. total_scanned += sc.nr_scanned;
  1917. if (zone->all_unreclaimable)
  1918. continue;
  1919. if (nr_slab == 0 &&
  1920. zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
  1921. zone->all_unreclaimable = 1;
  1922. /*
  1923. * If we've done a decent amount of scanning and
  1924. * the reclaim ratio is low, start doing writepage
  1925. * even in laptop mode
  1926. */
  1927. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  1928. total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
  1929. sc.may_writepage = 1;
  1930. if (!zone_watermark_ok(zone, order,
  1931. high_wmark_pages(zone), end_zone, 0)) {
  1932. all_zones_ok = 0;
  1933. /*
  1934. * We are still under min water mark. This
  1935. * means that we have a GFP_ATOMIC allocation
  1936. * failure risk. Hurry up!
  1937. */
  1938. if (!zone_watermark_ok(zone, order,
  1939. min_wmark_pages(zone), end_zone, 0))
  1940. has_under_min_watermark_zone = 1;
  1941. }
  1942. }
  1943. if (all_zones_ok)
  1944. break; /* kswapd: all done */
  1945. /*
  1946. * OK, kswapd is getting into trouble. Take a nap, then take
  1947. * another pass across the zones.
  1948. */
  1949. if (total_scanned && (priority < DEF_PRIORITY - 2)) {
  1950. if (has_under_min_watermark_zone)
  1951. count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
  1952. else
  1953. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1954. }
  1955. /*
  1956. * We do this so kswapd doesn't build up large priorities for
  1957. * example when it is freeing in parallel with allocators. It
  1958. * matches the direct reclaim path behaviour in terms of impact
  1959. * on zone->*_priority.
  1960. */
  1961. if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
  1962. break;
  1963. }
  1964. out:
  1965. if (!all_zones_ok) {
  1966. cond_resched();
  1967. try_to_freeze();
  1968. /*
  1969. * Fragmentation may mean that the system cannot be
  1970. * rebalanced for high-order allocations in all zones.
  1971. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
  1972. * it means the zones have been fully scanned and are still
  1973. * not balanced. For high-order allocations, there is
  1974. * little point trying all over again as kswapd may
  1975. * infinite loop.
  1976. *
  1977. * Instead, recheck all watermarks at order-0 as they
  1978. * are the most important. If watermarks are ok, kswapd will go
  1979. * back to sleep. High-order users can still perform direct
  1980. * reclaim if they wish.
  1981. */
  1982. if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
  1983. order = sc.order = 0;
  1984. goto loop_again;
  1985. }
  1986. return sc.nr_reclaimed;
  1987. }
  1988. /*
  1989. * The background pageout daemon, started as a kernel thread
  1990. * from the init process.
  1991. *
  1992. * This basically trickles out pages so that we have _some_
  1993. * free memory available even if there is no other activity
  1994. * that frees anything up. This is needed for things like routing
  1995. * etc, where we otherwise might have all activity going on in
  1996. * asynchronous contexts that cannot page things out.
  1997. *
  1998. * If there are applications that are active memory-allocators
  1999. * (most normal use), this basically shouldn't matter.
  2000. */
  2001. static int kswapd(void *p)
  2002. {
  2003. unsigned long order;
  2004. pg_data_t *pgdat = (pg_data_t*)p;
  2005. struct task_struct *tsk = current;
  2006. DEFINE_WAIT(wait);
  2007. struct reclaim_state reclaim_state = {
  2008. .reclaimed_slab = 0,
  2009. };
  2010. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  2011. lockdep_set_current_reclaim_state(GFP_KERNEL);
  2012. if (!cpumask_empty(cpumask))
  2013. set_cpus_allowed_ptr(tsk, cpumask);
  2014. current->reclaim_state = &reclaim_state;
  2015. /*
  2016. * Tell the memory management that we're a "memory allocator",
  2017. * and that if we need more memory we should get access to it
  2018. * regardless (see "__alloc_pages()"). "kswapd" should
  2019. * never get caught in the normal page freeing logic.
  2020. *
  2021. * (Kswapd normally doesn't need memory anyway, but sometimes
  2022. * you need a small amount of memory in order to be able to
  2023. * page out something else, and this flag essentially protects
  2024. * us from recursively trying to free more memory as we're
  2025. * trying to free the first piece of memory in the first place).
  2026. */
  2027. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  2028. set_freezable();
  2029. order = 0;
  2030. for ( ; ; ) {
  2031. unsigned long new_order;
  2032. int ret;
  2033. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2034. new_order = pgdat->kswapd_max_order;
  2035. pgdat->kswapd_max_order = 0;
  2036. if (order < new_order) {
  2037. /*
  2038. * Don't sleep if someone wants a larger 'order'
  2039. * allocation
  2040. */
  2041. order = new_order;
  2042. } else {
  2043. if (!freezing(current) && !kthread_should_stop()) {
  2044. long remaining = 0;
  2045. /* Try to sleep for a short interval */
  2046. if (!sleeping_prematurely(pgdat, order, remaining)) {
  2047. remaining = schedule_timeout(HZ/10);
  2048. finish_wait(&pgdat->kswapd_wait, &wait);
  2049. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2050. }
  2051. /*
  2052. * After a short sleep, check if it was a
  2053. * premature sleep. If not, then go fully
  2054. * to sleep until explicitly woken up
  2055. */
  2056. if (!sleeping_prematurely(pgdat, order, remaining)) {
  2057. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  2058. schedule();
  2059. } else {
  2060. if (remaining)
  2061. count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
  2062. else
  2063. count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
  2064. }
  2065. }
  2066. order = pgdat->kswapd_max_order;
  2067. }
  2068. finish_wait(&pgdat->kswapd_wait, &wait);
  2069. ret = try_to_freeze();
  2070. if (kthread_should_stop())
  2071. break;
  2072. /*
  2073. * We can speed up thawing tasks if we don't call balance_pgdat
  2074. * after returning from the refrigerator
  2075. */
  2076. if (!ret) {
  2077. trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
  2078. balance_pgdat(pgdat, order);
  2079. }
  2080. }
  2081. return 0;
  2082. }
  2083. /*
  2084. * A zone is low on free memory, so wake its kswapd task to service it.
  2085. */
  2086. void wakeup_kswapd(struct zone *zone, int order)
  2087. {
  2088. pg_data_t *pgdat;
  2089. if (!populated_zone(zone))
  2090. return;
  2091. pgdat = zone->zone_pgdat;
  2092. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
  2093. return;
  2094. if (pgdat->kswapd_max_order < order)
  2095. pgdat->kswapd_max_order = order;
  2096. trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
  2097. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2098. return;
  2099. if (!waitqueue_active(&pgdat->kswapd_wait))
  2100. return;
  2101. wake_up_interruptible(&pgdat->kswapd_wait);
  2102. }
  2103. /*
  2104. * The reclaimable count would be mostly accurate.
  2105. * The less reclaimable pages may be
  2106. * - mlocked pages, which will be moved to unevictable list when encountered
  2107. * - mapped pages, which may require several travels to be reclaimed
  2108. * - dirty pages, which is not "instantly" reclaimable
  2109. */
  2110. unsigned long global_reclaimable_pages(void)
  2111. {
  2112. int nr;
  2113. nr = global_page_state(NR_ACTIVE_FILE) +
  2114. global_page_state(NR_INACTIVE_FILE);
  2115. if (nr_swap_pages > 0)
  2116. nr += global_page_state(NR_ACTIVE_ANON) +
  2117. global_page_state(NR_INACTIVE_ANON);
  2118. return nr;
  2119. }
  2120. unsigned long zone_reclaimable_pages(struct zone *zone)
  2121. {
  2122. int nr;
  2123. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  2124. zone_page_state(zone, NR_INACTIVE_FILE);
  2125. if (nr_swap_pages > 0)
  2126. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  2127. zone_page_state(zone, NR_INACTIVE_ANON);
  2128. return nr;
  2129. }
  2130. #ifdef CONFIG_HIBERNATION
  2131. /*
  2132. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  2133. * freed pages.
  2134. *
  2135. * Rather than trying to age LRUs the aim is to preserve the overall
  2136. * LRU order by reclaiming preferentially
  2137. * inactive > active > active referenced > active mapped
  2138. */
  2139. unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  2140. {
  2141. struct reclaim_state reclaim_state;
  2142. struct scan_control sc = {
  2143. .gfp_mask = GFP_HIGHUSER_MOVABLE,
  2144. .may_swap = 1,
  2145. .may_unmap = 1,
  2146. .may_writepage = 1,
  2147. .nr_to_reclaim = nr_to_reclaim,
  2148. .hibernation_mode = 1,
  2149. .swappiness = vm_swappiness,
  2150. .order = 0,
  2151. };
  2152. struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  2153. struct task_struct *p = current;
  2154. unsigned long nr_reclaimed;
  2155. p->flags |= PF_MEMALLOC;
  2156. lockdep_set_current_reclaim_state(sc.gfp_mask);
  2157. reclaim_state.reclaimed_slab = 0;
  2158. p->reclaim_state = &reclaim_state;
  2159. nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
  2160. p->reclaim_state = NULL;
  2161. lockdep_clear_current_reclaim_state();
  2162. p->flags &= ~PF_MEMALLOC;
  2163. return nr_reclaimed;
  2164. }
  2165. #endif /* CONFIG_HIBERNATION */
  2166. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2167. not required for correctness. So if the last cpu in a node goes
  2168. away, we get changed to run anywhere: as the first one comes back,
  2169. restore their cpu bindings. */
  2170. static int __devinit cpu_callback(struct notifier_block *nfb,
  2171. unsigned long action, void *hcpu)
  2172. {
  2173. int nid;
  2174. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2175. for_each_node_state(nid, N_HIGH_MEMORY) {
  2176. pg_data_t *pgdat = NODE_DATA(nid);
  2177. const struct cpumask *mask;
  2178. mask = cpumask_of_node(pgdat->node_id);
  2179. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2180. /* One of our CPUs online: restore mask */
  2181. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2182. }
  2183. }
  2184. return NOTIFY_OK;
  2185. }
  2186. /*
  2187. * This kswapd start function will be called by init and node-hot-add.
  2188. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2189. */
  2190. int kswapd_run(int nid)
  2191. {
  2192. pg_data_t *pgdat = NODE_DATA(nid);
  2193. int ret = 0;
  2194. if (pgdat->kswapd)
  2195. return 0;
  2196. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2197. if (IS_ERR(pgdat->kswapd)) {
  2198. /* failure at boot is fatal */
  2199. BUG_ON(system_state == SYSTEM_BOOTING);
  2200. printk("Failed to start kswapd on node %d\n",nid);
  2201. ret = -1;
  2202. }
  2203. return ret;
  2204. }
  2205. /*
  2206. * Called by memory hotplug when all memory in a node is offlined.
  2207. */
  2208. void kswapd_stop(int nid)
  2209. {
  2210. struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
  2211. if (kswapd)
  2212. kthread_stop(kswapd);
  2213. }
  2214. static int __init kswapd_init(void)
  2215. {
  2216. int nid;
  2217. swap_setup();
  2218. for_each_node_state(nid, N_HIGH_MEMORY)
  2219. kswapd_run(nid);
  2220. hotcpu_notifier(cpu_callback, 0);
  2221. return 0;
  2222. }
  2223. module_init(kswapd_init)
  2224. #ifdef CONFIG_NUMA
  2225. /*
  2226. * Zone reclaim mode
  2227. *
  2228. * If non-zero call zone_reclaim when the number of free pages falls below
  2229. * the watermarks.
  2230. */
  2231. int zone_reclaim_mode __read_mostly;
  2232. #define RECLAIM_OFF 0
  2233. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2234. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2235. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2236. /*
  2237. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2238. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2239. * a zone.
  2240. */
  2241. #define ZONE_RECLAIM_PRIORITY 4
  2242. /*
  2243. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2244. * occur.
  2245. */
  2246. int sysctl_min_unmapped_ratio = 1;
  2247. /*
  2248. * If the number of slab pages in a zone grows beyond this percentage then
  2249. * slab reclaim needs to occur.
  2250. */
  2251. int sysctl_min_slab_ratio = 5;
  2252. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  2253. {
  2254. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  2255. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  2256. zone_page_state(zone, NR_ACTIVE_FILE);
  2257. /*
  2258. * It's possible for there to be more file mapped pages than
  2259. * accounted for by the pages on the file LRU lists because
  2260. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  2261. */
  2262. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  2263. }
  2264. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  2265. static long zone_pagecache_reclaimable(struct zone *zone)
  2266. {
  2267. long nr_pagecache_reclaimable;
  2268. long delta = 0;
  2269. /*
  2270. * If RECLAIM_SWAP is set, then all file pages are considered
  2271. * potentially reclaimable. Otherwise, we have to worry about
  2272. * pages like swapcache and zone_unmapped_file_pages() provides
  2273. * a better estimate
  2274. */
  2275. if (zone_reclaim_mode & RECLAIM_SWAP)
  2276. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  2277. else
  2278. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  2279. /* If we can't clean pages, remove dirty pages from consideration */
  2280. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  2281. delta += zone_page_state(zone, NR_FILE_DIRTY);
  2282. /* Watch for any possible underflows due to delta */
  2283. if (unlikely(delta > nr_pagecache_reclaimable))
  2284. delta = nr_pagecache_reclaimable;
  2285. return nr_pagecache_reclaimable - delta;
  2286. }
  2287. /*
  2288. * Try to free up some pages from this zone through reclaim.
  2289. */
  2290. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2291. {
  2292. /* Minimum pages needed in order to stay on node */
  2293. const unsigned long nr_pages = 1 << order;
  2294. struct task_struct *p = current;
  2295. struct reclaim_state reclaim_state;
  2296. int priority;
  2297. struct scan_control sc = {
  2298. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  2299. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  2300. .may_swap = 1,
  2301. .nr_to_reclaim = max_t(unsigned long, nr_pages,
  2302. SWAP_CLUSTER_MAX),
  2303. .gfp_mask = gfp_mask,
  2304. .swappiness = vm_swappiness,
  2305. .order = order,
  2306. };
  2307. unsigned long nr_slab_pages0, nr_slab_pages1;
  2308. cond_resched();
  2309. /*
  2310. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  2311. * and we also need to be able to write out pages for RECLAIM_WRITE
  2312. * and RECLAIM_SWAP.
  2313. */
  2314. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  2315. lockdep_set_current_reclaim_state(gfp_mask);
  2316. reclaim_state.reclaimed_slab = 0;
  2317. p->reclaim_state = &reclaim_state;
  2318. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  2319. /*
  2320. * Free memory by calling shrink zone with increasing
  2321. * priorities until we have enough memory freed.
  2322. */
  2323. priority = ZONE_RECLAIM_PRIORITY;
  2324. do {
  2325. shrink_zone(priority, zone, &sc);
  2326. priority--;
  2327. } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
  2328. }
  2329. nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2330. if (nr_slab_pages0 > zone->min_slab_pages) {
  2331. /*
  2332. * shrink_slab() does not currently allow us to determine how
  2333. * many pages were freed in this zone. So we take the current
  2334. * number of slab pages and shake the slab until it is reduced
  2335. * by the same nr_pages that we used for reclaiming unmapped
  2336. * pages.
  2337. *
  2338. * Note that shrink_slab will free memory on all zones and may
  2339. * take a long time.
  2340. */
  2341. for (;;) {
  2342. unsigned long lru_pages = zone_reclaimable_pages(zone);
  2343. /* No reclaimable slab or very low memory pressure */
  2344. if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
  2345. break;
  2346. /* Freed enough memory */
  2347. nr_slab_pages1 = zone_page_state(zone,
  2348. NR_SLAB_RECLAIMABLE);
  2349. if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
  2350. break;
  2351. }
  2352. /*
  2353. * Update nr_reclaimed by the number of slab pages we
  2354. * reclaimed from this zone.
  2355. */
  2356. nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2357. if (nr_slab_pages1 < nr_slab_pages0)
  2358. sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
  2359. }
  2360. p->reclaim_state = NULL;
  2361. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  2362. lockdep_clear_current_reclaim_state();
  2363. return sc.nr_reclaimed >= nr_pages;
  2364. }
  2365. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2366. {
  2367. int node_id;
  2368. int ret;
  2369. /*
  2370. * Zone reclaim reclaims unmapped file backed pages and
  2371. * slab pages if we are over the defined limits.
  2372. *
  2373. * A small portion of unmapped file backed pages is needed for
  2374. * file I/O otherwise pages read by file I/O will be immediately
  2375. * thrown out if the zone is overallocated. So we do not reclaim
  2376. * if less than a specified percentage of the zone is used by
  2377. * unmapped file backed pages.
  2378. */
  2379. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  2380. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  2381. return ZONE_RECLAIM_FULL;
  2382. if (zone->all_unreclaimable)
  2383. return ZONE_RECLAIM_FULL;
  2384. /*
  2385. * Do not scan if the allocation should not be delayed.
  2386. */
  2387. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  2388. return ZONE_RECLAIM_NOSCAN;
  2389. /*
  2390. * Only run zone reclaim on the local zone or on zones that do not
  2391. * have associated processors. This will favor the local processor
  2392. * over remote processors and spread off node memory allocations
  2393. * as wide as possible.
  2394. */
  2395. node_id = zone_to_nid(zone);
  2396. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  2397. return ZONE_RECLAIM_NOSCAN;
  2398. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  2399. return ZONE_RECLAIM_NOSCAN;
  2400. ret = __zone_reclaim(zone, gfp_mask, order);
  2401. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  2402. if (!ret)
  2403. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  2404. return ret;
  2405. }
  2406. #endif
  2407. /*
  2408. * page_evictable - test whether a page is evictable
  2409. * @page: the page to test
  2410. * @vma: the VMA in which the page is or will be mapped, may be NULL
  2411. *
  2412. * Test whether page is evictable--i.e., should be placed on active/inactive
  2413. * lists vs unevictable list. The vma argument is !NULL when called from the
  2414. * fault path to determine how to instantate a new page.
  2415. *
  2416. * Reasons page might not be evictable:
  2417. * (1) page's mapping marked unevictable
  2418. * (2) page is part of an mlocked VMA
  2419. *
  2420. */
  2421. int page_evictable(struct page *page, struct vm_area_struct *vma)
  2422. {
  2423. if (mapping_unevictable(page_mapping(page)))
  2424. return 0;
  2425. if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
  2426. return 0;
  2427. return 1;
  2428. }
  2429. /**
  2430. * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
  2431. * @page: page to check evictability and move to appropriate lru list
  2432. * @zone: zone page is in
  2433. *
  2434. * Checks a page for evictability and moves the page to the appropriate
  2435. * zone lru list.
  2436. *
  2437. * Restrictions: zone->lru_lock must be held, page must be on LRU and must
  2438. * have PageUnevictable set.
  2439. */
  2440. static void check_move_unevictable_page(struct page *page, struct zone *zone)
  2441. {
  2442. VM_BUG_ON(PageActive(page));
  2443. retry:
  2444. ClearPageUnevictable(page);
  2445. if (page_evictable(page, NULL)) {
  2446. enum lru_list l = page_lru_base_type(page);
  2447. __dec_zone_state(zone, NR_UNEVICTABLE);
  2448. list_move(&page->lru, &zone->lru[l].list);
  2449. mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
  2450. __inc_zone_state(zone, NR_INACTIVE_ANON + l);
  2451. __count_vm_event(UNEVICTABLE_PGRESCUED);
  2452. } else {
  2453. /*
  2454. * rotate unevictable list
  2455. */
  2456. SetPageUnevictable(page);
  2457. list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
  2458. mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
  2459. if (page_evictable(page, NULL))
  2460. goto retry;
  2461. }
  2462. }
  2463. /**
  2464. * scan_mapping_unevictable_pages - scan an address space for evictable pages
  2465. * @mapping: struct address_space to scan for evictable pages
  2466. *
  2467. * Scan all pages in mapping. Check unevictable pages for
  2468. * evictability and move them to the appropriate zone lru list.
  2469. */
  2470. void scan_mapping_unevictable_pages(struct address_space *mapping)
  2471. {
  2472. pgoff_t next = 0;
  2473. pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
  2474. PAGE_CACHE_SHIFT;
  2475. struct zone *zone;
  2476. struct pagevec pvec;
  2477. if (mapping->nrpages == 0)
  2478. return;
  2479. pagevec_init(&pvec, 0);
  2480. while (next < end &&
  2481. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  2482. int i;
  2483. int pg_scanned = 0;
  2484. zone = NULL;
  2485. for (i = 0; i < pagevec_count(&pvec); i++) {
  2486. struct page *page = pvec.pages[i];
  2487. pgoff_t page_index = page->index;
  2488. struct zone *pagezone = page_zone(page);
  2489. pg_scanned++;
  2490. if (page_index > next)
  2491. next = page_index;
  2492. next++;
  2493. if (pagezone != zone) {
  2494. if (zone)
  2495. spin_unlock_irq(&zone->lru_lock);
  2496. zone = pagezone;
  2497. spin_lock_irq(&zone->lru_lock);
  2498. }
  2499. if (PageLRU(page) && PageUnevictable(page))
  2500. check_move_unevictable_page(page, zone);
  2501. }
  2502. if (zone)
  2503. spin_unlock_irq(&zone->lru_lock);
  2504. pagevec_release(&pvec);
  2505. count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
  2506. }
  2507. }
  2508. /**
  2509. * scan_zone_unevictable_pages - check unevictable list for evictable pages
  2510. * @zone - zone of which to scan the unevictable list
  2511. *
  2512. * Scan @zone's unevictable LRU lists to check for pages that have become
  2513. * evictable. Move those that have to @zone's inactive list where they
  2514. * become candidates for reclaim, unless shrink_inactive_zone() decides
  2515. * to reactivate them. Pages that are still unevictable are rotated
  2516. * back onto @zone's unevictable list.
  2517. */
  2518. #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
  2519. static void scan_zone_unevictable_pages(struct zone *zone)
  2520. {
  2521. struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
  2522. unsigned long scan;
  2523. unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
  2524. while (nr_to_scan > 0) {
  2525. unsigned long batch_size = min(nr_to_scan,
  2526. SCAN_UNEVICTABLE_BATCH_SIZE);
  2527. spin_lock_irq(&zone->lru_lock);
  2528. for (scan = 0; scan < batch_size; scan++) {
  2529. struct page *page = lru_to_page(l_unevictable);
  2530. if (!trylock_page(page))
  2531. continue;
  2532. prefetchw_prev_lru_page(page, l_unevictable, flags);
  2533. if (likely(PageLRU(page) && PageUnevictable(page)))
  2534. check_move_unevictable_page(page, zone);
  2535. unlock_page(page);
  2536. }
  2537. spin_unlock_irq(&zone->lru_lock);
  2538. nr_to_scan -= batch_size;
  2539. }
  2540. }
  2541. /**
  2542. * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
  2543. *
  2544. * A really big hammer: scan all zones' unevictable LRU lists to check for
  2545. * pages that have become evictable. Move those back to the zones'
  2546. * inactive list where they become candidates for reclaim.
  2547. * This occurs when, e.g., we have unswappable pages on the unevictable lists,
  2548. * and we add swap to the system. As such, it runs in the context of a task
  2549. * that has possibly/probably made some previously unevictable pages
  2550. * evictable.
  2551. */
  2552. static void scan_all_zones_unevictable_pages(void)
  2553. {
  2554. struct zone *zone;
  2555. for_each_zone(zone) {
  2556. scan_zone_unevictable_pages(zone);
  2557. }
  2558. }
  2559. /*
  2560. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  2561. * all nodes' unevictable lists for evictable pages
  2562. */
  2563. unsigned long scan_unevictable_pages;
  2564. int scan_unevictable_handler(struct ctl_table *table, int write,
  2565. void __user *buffer,
  2566. size_t *length, loff_t *ppos)
  2567. {
  2568. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2569. if (write && *(unsigned long *)table->data)
  2570. scan_all_zones_unevictable_pages();
  2571. scan_unevictable_pages = 0;
  2572. return 0;
  2573. }
  2574. /*
  2575. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  2576. * a specified node's per zone unevictable lists for evictable pages.
  2577. */
  2578. static ssize_t read_scan_unevictable_node(struct sys_device *dev,
  2579. struct sysdev_attribute *attr,
  2580. char *buf)
  2581. {
  2582. return sprintf(buf, "0\n"); /* always zero; should fit... */
  2583. }
  2584. static ssize_t write_scan_unevictable_node(struct sys_device *dev,
  2585. struct sysdev_attribute *attr,
  2586. const char *buf, size_t count)
  2587. {
  2588. struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
  2589. struct zone *zone;
  2590. unsigned long res;
  2591. unsigned long req = strict_strtoul(buf, 10, &res);
  2592. if (!req)
  2593. return 1; /* zero is no-op */
  2594. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  2595. if (!populated_zone(zone))
  2596. continue;
  2597. scan_zone_unevictable_pages(zone);
  2598. }
  2599. return 1;
  2600. }
  2601. static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  2602. read_scan_unevictable_node,
  2603. write_scan_unevictable_node);
  2604. int scan_unevictable_register_node(struct node *node)
  2605. {
  2606. return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
  2607. }
  2608. void scan_unevictable_unregister_node(struct node *node)
  2609. {
  2610. sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
  2611. }