vmscan.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/gfp.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/backing-dev.h>
  29. #include <linux/rmap.h>
  30. #include <linux/topology.h>
  31. #include <linux/cpu.h>
  32. #include <linux/cpuset.h>
  33. #include <linux/compaction.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <linux/oom.h>
  43. #include <linux/prefetch.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/div64.h>
  46. #include <linux/swapops.h>
  47. #include "internal.h"
  48. #define CREATE_TRACE_POINTS
  49. #include <trace/events/vmscan.h>
  50. /*
  51. * reclaim_mode determines how the inactive list is shrunk
  52. * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
  53. * RECLAIM_MODE_ASYNC: Do not block
  54. * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
  55. * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
  56. * page from the LRU and reclaim all pages within a
  57. * naturally aligned range
  58. * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
  59. * order-0 pages and then compact the zone
  60. */
  61. typedef unsigned __bitwise__ reclaim_mode_t;
  62. #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
  63. #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
  64. #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
  65. #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
  66. #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
  67. struct scan_control {
  68. /* Incremented by the number of inactive pages that were scanned */
  69. unsigned long nr_scanned;
  70. /* Number of pages freed so far during a call to shrink_zones() */
  71. unsigned long nr_reclaimed;
  72. /* How many pages shrink_list() should reclaim */
  73. unsigned long nr_to_reclaim;
  74. unsigned long hibernation_mode;
  75. /* This context's GFP mask */
  76. gfp_t gfp_mask;
  77. int may_writepage;
  78. /* Can mapped pages be reclaimed? */
  79. int may_unmap;
  80. /* Can pages be swapped as part of reclaim? */
  81. int may_swap;
  82. int order;
  83. /*
  84. * Intend to reclaim enough continuous memory rather than reclaim
  85. * enough amount of memory. i.e, mode for high order allocation.
  86. */
  87. reclaim_mode_t reclaim_mode;
  88. /*
  89. * The memory cgroup that hit its limit and as a result is the
  90. * primary target of this reclaim invocation.
  91. */
  92. struct mem_cgroup *target_mem_cgroup;
  93. /*
  94. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  95. * are scanned.
  96. */
  97. nodemask_t *nodemask;
  98. };
  99. struct mem_cgroup_zone {
  100. struct mem_cgroup *mem_cgroup;
  101. struct zone *zone;
  102. };
  103. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  104. #ifdef ARCH_HAS_PREFETCH
  105. #define prefetch_prev_lru_page(_page, _base, _field) \
  106. do { \
  107. if ((_page)->lru.prev != _base) { \
  108. struct page *prev; \
  109. \
  110. prev = lru_to_page(&(_page->lru)); \
  111. prefetch(&prev->_field); \
  112. } \
  113. } while (0)
  114. #else
  115. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  116. #endif
  117. #ifdef ARCH_HAS_PREFETCHW
  118. #define prefetchw_prev_lru_page(_page, _base, _field) \
  119. do { \
  120. if ((_page)->lru.prev != _base) { \
  121. struct page *prev; \
  122. \
  123. prev = lru_to_page(&(_page->lru)); \
  124. prefetchw(&prev->_field); \
  125. } \
  126. } while (0)
  127. #else
  128. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  129. #endif
  130. /*
  131. * From 0 .. 100. Higher means more swappy.
  132. */
  133. int vm_swappiness = 60;
  134. long vm_total_pages; /* The total number of pages which the VM controls */
  135. static LIST_HEAD(shrinker_list);
  136. static DECLARE_RWSEM(shrinker_rwsem);
  137. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  138. static bool global_reclaim(struct scan_control *sc)
  139. {
  140. return !sc->target_mem_cgroup;
  141. }
  142. static bool scanning_global_lru(struct mem_cgroup_zone *mz)
  143. {
  144. return !mz->mem_cgroup;
  145. }
  146. #else
  147. static bool global_reclaim(struct scan_control *sc)
  148. {
  149. return true;
  150. }
  151. static bool scanning_global_lru(struct mem_cgroup_zone *mz)
  152. {
  153. return true;
  154. }
  155. #endif
  156. static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
  157. {
  158. if (!scanning_global_lru(mz))
  159. return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
  160. return &mz->zone->reclaim_stat;
  161. }
  162. static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
  163. enum lru_list lru)
  164. {
  165. if (!scanning_global_lru(mz))
  166. return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
  167. zone_to_nid(mz->zone),
  168. zone_idx(mz->zone),
  169. BIT(lru));
  170. return zone_page_state(mz->zone, NR_LRU_BASE + lru);
  171. }
  172. /*
  173. * Add a shrinker callback to be called from the vm
  174. */
  175. void register_shrinker(struct shrinker *shrinker)
  176. {
  177. atomic_long_set(&shrinker->nr_in_batch, 0);
  178. down_write(&shrinker_rwsem);
  179. list_add_tail(&shrinker->list, &shrinker_list);
  180. up_write(&shrinker_rwsem);
  181. }
  182. EXPORT_SYMBOL(register_shrinker);
  183. /*
  184. * Remove one
  185. */
  186. void unregister_shrinker(struct shrinker *shrinker)
  187. {
  188. down_write(&shrinker_rwsem);
  189. list_del(&shrinker->list);
  190. up_write(&shrinker_rwsem);
  191. }
  192. EXPORT_SYMBOL(unregister_shrinker);
  193. static inline int do_shrinker_shrink(struct shrinker *shrinker,
  194. struct shrink_control *sc,
  195. unsigned long nr_to_scan)
  196. {
  197. sc->nr_to_scan = nr_to_scan;
  198. return (*shrinker->shrink)(shrinker, sc);
  199. }
  200. #define SHRINK_BATCH 128
  201. /*
  202. * Call the shrink functions to age shrinkable caches
  203. *
  204. * Here we assume it costs one seek to replace a lru page and that it also
  205. * takes a seek to recreate a cache object. With this in mind we age equal
  206. * percentages of the lru and ageable caches. This should balance the seeks
  207. * generated by these structures.
  208. *
  209. * If the vm encountered mapped pages on the LRU it increase the pressure on
  210. * slab to avoid swapping.
  211. *
  212. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  213. *
  214. * `lru_pages' represents the number of on-LRU pages in all the zones which
  215. * are eligible for the caller's allocation attempt. It is used for balancing
  216. * slab reclaim versus page reclaim.
  217. *
  218. * Returns the number of slab objects which we shrunk.
  219. */
  220. unsigned long shrink_slab(struct shrink_control *shrink,
  221. unsigned long nr_pages_scanned,
  222. unsigned long lru_pages)
  223. {
  224. struct shrinker *shrinker;
  225. unsigned long ret = 0;
  226. if (nr_pages_scanned == 0)
  227. nr_pages_scanned = SWAP_CLUSTER_MAX;
  228. if (!down_read_trylock(&shrinker_rwsem)) {
  229. /* Assume we'll be able to shrink next time */
  230. ret = 1;
  231. goto out;
  232. }
  233. list_for_each_entry(shrinker, &shrinker_list, list) {
  234. unsigned long long delta;
  235. long total_scan;
  236. long max_pass;
  237. int shrink_ret = 0;
  238. long nr;
  239. long new_nr;
  240. long batch_size = shrinker->batch ? shrinker->batch
  241. : SHRINK_BATCH;
  242. max_pass = do_shrinker_shrink(shrinker, shrink, 0);
  243. if (max_pass <= 0)
  244. continue;
  245. /*
  246. * copy the current shrinker scan count into a local variable
  247. * and zero it so that other concurrent shrinker invocations
  248. * don't also do this scanning work.
  249. */
  250. nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
  251. total_scan = nr;
  252. delta = (4 * nr_pages_scanned) / shrinker->seeks;
  253. delta *= max_pass;
  254. do_div(delta, lru_pages + 1);
  255. total_scan += delta;
  256. if (total_scan < 0) {
  257. printk(KERN_ERR "shrink_slab: %pF negative objects to "
  258. "delete nr=%ld\n",
  259. shrinker->shrink, total_scan);
  260. total_scan = max_pass;
  261. }
  262. /*
  263. * We need to avoid excessive windup on filesystem shrinkers
  264. * due to large numbers of GFP_NOFS allocations causing the
  265. * shrinkers to return -1 all the time. This results in a large
  266. * nr being built up so when a shrink that can do some work
  267. * comes along it empties the entire cache due to nr >>>
  268. * max_pass. This is bad for sustaining a working set in
  269. * memory.
  270. *
  271. * Hence only allow the shrinker to scan the entire cache when
  272. * a large delta change is calculated directly.
  273. */
  274. if (delta < max_pass / 4)
  275. total_scan = min(total_scan, max_pass / 2);
  276. /*
  277. * Avoid risking looping forever due to too large nr value:
  278. * never try to free more than twice the estimate number of
  279. * freeable entries.
  280. */
  281. if (total_scan > max_pass * 2)
  282. total_scan = max_pass * 2;
  283. trace_mm_shrink_slab_start(shrinker, shrink, nr,
  284. nr_pages_scanned, lru_pages,
  285. max_pass, delta, total_scan);
  286. while (total_scan >= batch_size) {
  287. int nr_before;
  288. nr_before = do_shrinker_shrink(shrinker, shrink, 0);
  289. shrink_ret = do_shrinker_shrink(shrinker, shrink,
  290. batch_size);
  291. if (shrink_ret == -1)
  292. break;
  293. if (shrink_ret < nr_before)
  294. ret += nr_before - shrink_ret;
  295. count_vm_events(SLABS_SCANNED, batch_size);
  296. total_scan -= batch_size;
  297. cond_resched();
  298. }
  299. /*
  300. * move the unused scan count back into the shrinker in a
  301. * manner that handles concurrent updates. If we exhausted the
  302. * scan, there is no need to do an update.
  303. */
  304. if (total_scan > 0)
  305. new_nr = atomic_long_add_return(total_scan,
  306. &shrinker->nr_in_batch);
  307. else
  308. new_nr = atomic_long_read(&shrinker->nr_in_batch);
  309. trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
  310. }
  311. up_read(&shrinker_rwsem);
  312. out:
  313. cond_resched();
  314. return ret;
  315. }
  316. static void set_reclaim_mode(int priority, struct scan_control *sc,
  317. bool sync)
  318. {
  319. reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
  320. /*
  321. * Initially assume we are entering either lumpy reclaim or
  322. * reclaim/compaction.Depending on the order, we will either set the
  323. * sync mode or just reclaim order-0 pages later.
  324. */
  325. if (COMPACTION_BUILD)
  326. sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
  327. else
  328. sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
  329. /*
  330. * Avoid using lumpy reclaim or reclaim/compaction if possible by
  331. * restricting when its set to either costly allocations or when
  332. * under memory pressure
  333. */
  334. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  335. sc->reclaim_mode |= syncmode;
  336. else if (sc->order && priority < DEF_PRIORITY - 2)
  337. sc->reclaim_mode |= syncmode;
  338. else
  339. sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
  340. }
  341. static void reset_reclaim_mode(struct scan_control *sc)
  342. {
  343. sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
  344. }
  345. static inline int is_page_cache_freeable(struct page *page)
  346. {
  347. /*
  348. * A freeable page cache page is referenced only by the caller
  349. * that isolated the page, the page cache radix tree and
  350. * optional buffer heads at page->private.
  351. */
  352. return page_count(page) - page_has_private(page) == 2;
  353. }
  354. static int may_write_to_queue(struct backing_dev_info *bdi,
  355. struct scan_control *sc)
  356. {
  357. if (current->flags & PF_SWAPWRITE)
  358. return 1;
  359. if (!bdi_write_congested(bdi))
  360. return 1;
  361. if (bdi == current->backing_dev_info)
  362. return 1;
  363. /* lumpy reclaim for hugepage often need a lot of write */
  364. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  365. return 1;
  366. return 0;
  367. }
  368. /*
  369. * We detected a synchronous write error writing a page out. Probably
  370. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  371. * fsync(), msync() or close().
  372. *
  373. * The tricky part is that after writepage we cannot touch the mapping: nothing
  374. * prevents it from being freed up. But we have a ref on the page and once
  375. * that page is locked, the mapping is pinned.
  376. *
  377. * We're allowed to run sleeping lock_page() here because we know the caller has
  378. * __GFP_FS.
  379. */
  380. static void handle_write_error(struct address_space *mapping,
  381. struct page *page, int error)
  382. {
  383. lock_page(page);
  384. if (page_mapping(page) == mapping)
  385. mapping_set_error(mapping, error);
  386. unlock_page(page);
  387. }
  388. /* possible outcome of pageout() */
  389. typedef enum {
  390. /* failed to write page out, page is locked */
  391. PAGE_KEEP,
  392. /* move page to the active list, page is locked */
  393. PAGE_ACTIVATE,
  394. /* page has been sent to the disk successfully, page is unlocked */
  395. PAGE_SUCCESS,
  396. /* page is clean and locked */
  397. PAGE_CLEAN,
  398. } pageout_t;
  399. /*
  400. * pageout is called by shrink_page_list() for each dirty page.
  401. * Calls ->writepage().
  402. */
  403. static pageout_t pageout(struct page *page, struct address_space *mapping,
  404. struct scan_control *sc)
  405. {
  406. /*
  407. * If the page is dirty, only perform writeback if that write
  408. * will be non-blocking. To prevent this allocation from being
  409. * stalled by pagecache activity. But note that there may be
  410. * stalls if we need to run get_block(). We could test
  411. * PagePrivate for that.
  412. *
  413. * If this process is currently in __generic_file_aio_write() against
  414. * this page's queue, we can perform writeback even if that
  415. * will block.
  416. *
  417. * If the page is swapcache, write it back even if that would
  418. * block, for some throttling. This happens by accident, because
  419. * swap_backing_dev_info is bust: it doesn't reflect the
  420. * congestion state of the swapdevs. Easy to fix, if needed.
  421. */
  422. if (!is_page_cache_freeable(page))
  423. return PAGE_KEEP;
  424. if (!mapping) {
  425. /*
  426. * Some data journaling orphaned pages can have
  427. * page->mapping == NULL while being dirty with clean buffers.
  428. */
  429. if (page_has_private(page)) {
  430. if (try_to_free_buffers(page)) {
  431. ClearPageDirty(page);
  432. printk("%s: orphaned page\n", __func__);
  433. return PAGE_CLEAN;
  434. }
  435. }
  436. return PAGE_KEEP;
  437. }
  438. if (mapping->a_ops->writepage == NULL)
  439. return PAGE_ACTIVATE;
  440. if (!may_write_to_queue(mapping->backing_dev_info, sc))
  441. return PAGE_KEEP;
  442. if (clear_page_dirty_for_io(page)) {
  443. int res;
  444. struct writeback_control wbc = {
  445. .sync_mode = WB_SYNC_NONE,
  446. .nr_to_write = SWAP_CLUSTER_MAX,
  447. .range_start = 0,
  448. .range_end = LLONG_MAX,
  449. .for_reclaim = 1,
  450. };
  451. SetPageReclaim(page);
  452. res = mapping->a_ops->writepage(page, &wbc);
  453. if (res < 0)
  454. handle_write_error(mapping, page, res);
  455. if (res == AOP_WRITEPAGE_ACTIVATE) {
  456. ClearPageReclaim(page);
  457. return PAGE_ACTIVATE;
  458. }
  459. if (!PageWriteback(page)) {
  460. /* synchronous write or broken a_ops? */
  461. ClearPageReclaim(page);
  462. }
  463. trace_mm_vmscan_writepage(page,
  464. trace_reclaim_flags(page, sc->reclaim_mode));
  465. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  466. return PAGE_SUCCESS;
  467. }
  468. return PAGE_CLEAN;
  469. }
  470. /*
  471. * Same as remove_mapping, but if the page is removed from the mapping, it
  472. * gets returned with a refcount of 0.
  473. */
  474. static int __remove_mapping(struct address_space *mapping, struct page *page)
  475. {
  476. BUG_ON(!PageLocked(page));
  477. BUG_ON(mapping != page_mapping(page));
  478. spin_lock_irq(&mapping->tree_lock);
  479. /*
  480. * The non racy check for a busy page.
  481. *
  482. * Must be careful with the order of the tests. When someone has
  483. * a ref to the page, it may be possible that they dirty it then
  484. * drop the reference. So if PageDirty is tested before page_count
  485. * here, then the following race may occur:
  486. *
  487. * get_user_pages(&page);
  488. * [user mapping goes away]
  489. * write_to(page);
  490. * !PageDirty(page) [good]
  491. * SetPageDirty(page);
  492. * put_page(page);
  493. * !page_count(page) [good, discard it]
  494. *
  495. * [oops, our write_to data is lost]
  496. *
  497. * Reversing the order of the tests ensures such a situation cannot
  498. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  499. * load is not satisfied before that of page->_count.
  500. *
  501. * Note that if SetPageDirty is always performed via set_page_dirty,
  502. * and thus under tree_lock, then this ordering is not required.
  503. */
  504. if (!page_freeze_refs(page, 2))
  505. goto cannot_free;
  506. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  507. if (unlikely(PageDirty(page))) {
  508. page_unfreeze_refs(page, 2);
  509. goto cannot_free;
  510. }
  511. if (PageSwapCache(page)) {
  512. swp_entry_t swap = { .val = page_private(page) };
  513. __delete_from_swap_cache(page);
  514. spin_unlock_irq(&mapping->tree_lock);
  515. swapcache_free(swap, page);
  516. } else {
  517. void (*freepage)(struct page *);
  518. freepage = mapping->a_ops->freepage;
  519. __delete_from_page_cache(page);
  520. spin_unlock_irq(&mapping->tree_lock);
  521. mem_cgroup_uncharge_cache_page(page);
  522. if (freepage != NULL)
  523. freepage(page);
  524. }
  525. return 1;
  526. cannot_free:
  527. spin_unlock_irq(&mapping->tree_lock);
  528. return 0;
  529. }
  530. /*
  531. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  532. * someone else has a ref on the page, abort and return 0. If it was
  533. * successfully detached, return 1. Assumes the caller has a single ref on
  534. * this page.
  535. */
  536. int remove_mapping(struct address_space *mapping, struct page *page)
  537. {
  538. if (__remove_mapping(mapping, page)) {
  539. /*
  540. * Unfreezing the refcount with 1 rather than 2 effectively
  541. * drops the pagecache ref for us without requiring another
  542. * atomic operation.
  543. */
  544. page_unfreeze_refs(page, 1);
  545. return 1;
  546. }
  547. return 0;
  548. }
  549. /**
  550. * putback_lru_page - put previously isolated page onto appropriate LRU list
  551. * @page: page to be put back to appropriate lru list
  552. *
  553. * Add previously isolated @page to appropriate LRU list.
  554. * Page may still be unevictable for other reasons.
  555. *
  556. * lru_lock must not be held, interrupts must be enabled.
  557. */
  558. void putback_lru_page(struct page *page)
  559. {
  560. int lru;
  561. int active = !!TestClearPageActive(page);
  562. int was_unevictable = PageUnevictable(page);
  563. VM_BUG_ON(PageLRU(page));
  564. redo:
  565. ClearPageUnevictable(page);
  566. if (page_evictable(page, NULL)) {
  567. /*
  568. * For evictable pages, we can use the cache.
  569. * In event of a race, worst case is we end up with an
  570. * unevictable page on [in]active list.
  571. * We know how to handle that.
  572. */
  573. lru = active + page_lru_base_type(page);
  574. lru_cache_add_lru(page, lru);
  575. } else {
  576. /*
  577. * Put unevictable pages directly on zone's unevictable
  578. * list.
  579. */
  580. lru = LRU_UNEVICTABLE;
  581. add_page_to_unevictable_list(page);
  582. /*
  583. * When racing with an mlock or AS_UNEVICTABLE clearing
  584. * (page is unlocked) make sure that if the other thread
  585. * does not observe our setting of PG_lru and fails
  586. * isolation/check_move_unevictable_pages,
  587. * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
  588. * the page back to the evictable list.
  589. *
  590. * The other side is TestClearPageMlocked() or shmem_lock().
  591. */
  592. smp_mb();
  593. }
  594. /*
  595. * page's status can change while we move it among lru. If an evictable
  596. * page is on unevictable list, it never be freed. To avoid that,
  597. * check after we added it to the list, again.
  598. */
  599. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  600. if (!isolate_lru_page(page)) {
  601. put_page(page);
  602. goto redo;
  603. }
  604. /* This means someone else dropped this page from LRU
  605. * So, it will be freed or putback to LRU again. There is
  606. * nothing to do here.
  607. */
  608. }
  609. if (was_unevictable && lru != LRU_UNEVICTABLE)
  610. count_vm_event(UNEVICTABLE_PGRESCUED);
  611. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  612. count_vm_event(UNEVICTABLE_PGCULLED);
  613. put_page(page); /* drop ref from isolate */
  614. }
  615. enum page_references {
  616. PAGEREF_RECLAIM,
  617. PAGEREF_RECLAIM_CLEAN,
  618. PAGEREF_KEEP,
  619. PAGEREF_ACTIVATE,
  620. };
  621. static enum page_references page_check_references(struct page *page,
  622. struct mem_cgroup_zone *mz,
  623. struct scan_control *sc)
  624. {
  625. int referenced_ptes, referenced_page;
  626. unsigned long vm_flags;
  627. referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
  628. referenced_page = TestClearPageReferenced(page);
  629. /* Lumpy reclaim - ignore references */
  630. if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
  631. return PAGEREF_RECLAIM;
  632. /*
  633. * Mlock lost the isolation race with us. Let try_to_unmap()
  634. * move the page to the unevictable list.
  635. */
  636. if (vm_flags & VM_LOCKED)
  637. return PAGEREF_RECLAIM;
  638. if (referenced_ptes) {
  639. if (PageAnon(page))
  640. return PAGEREF_ACTIVATE;
  641. /*
  642. * All mapped pages start out with page table
  643. * references from the instantiating fault, so we need
  644. * to look twice if a mapped file page is used more
  645. * than once.
  646. *
  647. * Mark it and spare it for another trip around the
  648. * inactive list. Another page table reference will
  649. * lead to its activation.
  650. *
  651. * Note: the mark is set for activated pages as well
  652. * so that recently deactivated but used pages are
  653. * quickly recovered.
  654. */
  655. SetPageReferenced(page);
  656. if (referenced_page || referenced_ptes > 1)
  657. return PAGEREF_ACTIVATE;
  658. /*
  659. * Activate file-backed executable pages after first usage.
  660. */
  661. if (vm_flags & VM_EXEC)
  662. return PAGEREF_ACTIVATE;
  663. return PAGEREF_KEEP;
  664. }
  665. /* Reclaim if clean, defer dirty pages to writeback */
  666. if (referenced_page && !PageSwapBacked(page))
  667. return PAGEREF_RECLAIM_CLEAN;
  668. return PAGEREF_RECLAIM;
  669. }
  670. /*
  671. * shrink_page_list() returns the number of reclaimed pages
  672. */
  673. static unsigned long shrink_page_list(struct list_head *page_list,
  674. struct mem_cgroup_zone *mz,
  675. struct scan_control *sc,
  676. int priority,
  677. unsigned long *ret_nr_dirty,
  678. unsigned long *ret_nr_writeback)
  679. {
  680. LIST_HEAD(ret_pages);
  681. LIST_HEAD(free_pages);
  682. int pgactivate = 0;
  683. unsigned long nr_dirty = 0;
  684. unsigned long nr_congested = 0;
  685. unsigned long nr_reclaimed = 0;
  686. unsigned long nr_writeback = 0;
  687. cond_resched();
  688. while (!list_empty(page_list)) {
  689. enum page_references references;
  690. struct address_space *mapping;
  691. struct page *page;
  692. int may_enter_fs;
  693. cond_resched();
  694. page = lru_to_page(page_list);
  695. list_del(&page->lru);
  696. if (!trylock_page(page))
  697. goto keep;
  698. VM_BUG_ON(PageActive(page));
  699. VM_BUG_ON(page_zone(page) != mz->zone);
  700. sc->nr_scanned++;
  701. if (unlikely(!page_evictable(page, NULL)))
  702. goto cull_mlocked;
  703. if (!sc->may_unmap && page_mapped(page))
  704. goto keep_locked;
  705. /* Double the slab pressure for mapped and swapcache pages */
  706. if (page_mapped(page) || PageSwapCache(page))
  707. sc->nr_scanned++;
  708. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  709. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  710. if (PageWriteback(page)) {
  711. nr_writeback++;
  712. /*
  713. * Synchronous reclaim cannot queue pages for
  714. * writeback due to the possibility of stack overflow
  715. * but if it encounters a page under writeback, wait
  716. * for the IO to complete.
  717. */
  718. if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
  719. may_enter_fs)
  720. wait_on_page_writeback(page);
  721. else {
  722. unlock_page(page);
  723. goto keep_lumpy;
  724. }
  725. }
  726. references = page_check_references(page, mz, sc);
  727. switch (references) {
  728. case PAGEREF_ACTIVATE:
  729. goto activate_locked;
  730. case PAGEREF_KEEP:
  731. goto keep_locked;
  732. case PAGEREF_RECLAIM:
  733. case PAGEREF_RECLAIM_CLEAN:
  734. ; /* try to reclaim the page below */
  735. }
  736. /*
  737. * Anonymous process memory has backing store?
  738. * Try to allocate it some swap space here.
  739. */
  740. if (PageAnon(page) && !PageSwapCache(page)) {
  741. if (!(sc->gfp_mask & __GFP_IO))
  742. goto keep_locked;
  743. if (!add_to_swap(page))
  744. goto activate_locked;
  745. may_enter_fs = 1;
  746. }
  747. mapping = page_mapping(page);
  748. /*
  749. * The page is mapped into the page tables of one or more
  750. * processes. Try to unmap it here.
  751. */
  752. if (page_mapped(page) && mapping) {
  753. switch (try_to_unmap(page, TTU_UNMAP)) {
  754. case SWAP_FAIL:
  755. goto activate_locked;
  756. case SWAP_AGAIN:
  757. goto keep_locked;
  758. case SWAP_MLOCK:
  759. goto cull_mlocked;
  760. case SWAP_SUCCESS:
  761. ; /* try to free the page below */
  762. }
  763. }
  764. if (PageDirty(page)) {
  765. nr_dirty++;
  766. /*
  767. * Only kswapd can writeback filesystem pages to
  768. * avoid risk of stack overflow but do not writeback
  769. * unless under significant pressure.
  770. */
  771. if (page_is_file_cache(page) &&
  772. (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
  773. /*
  774. * Immediately reclaim when written back.
  775. * Similar in principal to deactivate_page()
  776. * except we already have the page isolated
  777. * and know it's dirty
  778. */
  779. inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
  780. SetPageReclaim(page);
  781. goto keep_locked;
  782. }
  783. if (references == PAGEREF_RECLAIM_CLEAN)
  784. goto keep_locked;
  785. if (!may_enter_fs)
  786. goto keep_locked;
  787. if (!sc->may_writepage)
  788. goto keep_locked;
  789. /* Page is dirty, try to write it out here */
  790. switch (pageout(page, mapping, sc)) {
  791. case PAGE_KEEP:
  792. nr_congested++;
  793. goto keep_locked;
  794. case PAGE_ACTIVATE:
  795. goto activate_locked;
  796. case PAGE_SUCCESS:
  797. if (PageWriteback(page))
  798. goto keep_lumpy;
  799. if (PageDirty(page))
  800. goto keep;
  801. /*
  802. * A synchronous write - probably a ramdisk. Go
  803. * ahead and try to reclaim the page.
  804. */
  805. if (!trylock_page(page))
  806. goto keep;
  807. if (PageDirty(page) || PageWriteback(page))
  808. goto keep_locked;
  809. mapping = page_mapping(page);
  810. case PAGE_CLEAN:
  811. ; /* try to free the page below */
  812. }
  813. }
  814. /*
  815. * If the page has buffers, try to free the buffer mappings
  816. * associated with this page. If we succeed we try to free
  817. * the page as well.
  818. *
  819. * We do this even if the page is PageDirty().
  820. * try_to_release_page() does not perform I/O, but it is
  821. * possible for a page to have PageDirty set, but it is actually
  822. * clean (all its buffers are clean). This happens if the
  823. * buffers were written out directly, with submit_bh(). ext3
  824. * will do this, as well as the blockdev mapping.
  825. * try_to_release_page() will discover that cleanness and will
  826. * drop the buffers and mark the page clean - it can be freed.
  827. *
  828. * Rarely, pages can have buffers and no ->mapping. These are
  829. * the pages which were not successfully invalidated in
  830. * truncate_complete_page(). We try to drop those buffers here
  831. * and if that worked, and the page is no longer mapped into
  832. * process address space (page_count == 1) it can be freed.
  833. * Otherwise, leave the page on the LRU so it is swappable.
  834. */
  835. if (page_has_private(page)) {
  836. if (!try_to_release_page(page, sc->gfp_mask))
  837. goto activate_locked;
  838. if (!mapping && page_count(page) == 1) {
  839. unlock_page(page);
  840. if (put_page_testzero(page))
  841. goto free_it;
  842. else {
  843. /*
  844. * rare race with speculative reference.
  845. * the speculative reference will free
  846. * this page shortly, so we may
  847. * increment nr_reclaimed here (and
  848. * leave it off the LRU).
  849. */
  850. nr_reclaimed++;
  851. continue;
  852. }
  853. }
  854. }
  855. if (!mapping || !__remove_mapping(mapping, page))
  856. goto keep_locked;
  857. /*
  858. * At this point, we have no other references and there is
  859. * no way to pick any more up (removed from LRU, removed
  860. * from pagecache). Can use non-atomic bitops now (and
  861. * we obviously don't have to worry about waking up a process
  862. * waiting on the page lock, because there are no references.
  863. */
  864. __clear_page_locked(page);
  865. free_it:
  866. nr_reclaimed++;
  867. /*
  868. * Is there need to periodically free_page_list? It would
  869. * appear not as the counts should be low
  870. */
  871. list_add(&page->lru, &free_pages);
  872. continue;
  873. cull_mlocked:
  874. if (PageSwapCache(page))
  875. try_to_free_swap(page);
  876. unlock_page(page);
  877. putback_lru_page(page);
  878. reset_reclaim_mode(sc);
  879. continue;
  880. activate_locked:
  881. /* Not a candidate for swapping, so reclaim swap space. */
  882. if (PageSwapCache(page) && vm_swap_full())
  883. try_to_free_swap(page);
  884. VM_BUG_ON(PageActive(page));
  885. SetPageActive(page);
  886. pgactivate++;
  887. keep_locked:
  888. unlock_page(page);
  889. keep:
  890. reset_reclaim_mode(sc);
  891. keep_lumpy:
  892. list_add(&page->lru, &ret_pages);
  893. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  894. }
  895. /*
  896. * Tag a zone as congested if all the dirty pages encountered were
  897. * backed by a congested BDI. In this case, reclaimers should just
  898. * back off and wait for congestion to clear because further reclaim
  899. * will encounter the same problem
  900. */
  901. if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
  902. zone_set_flag(mz->zone, ZONE_CONGESTED);
  903. free_hot_cold_page_list(&free_pages, 1);
  904. list_splice(&ret_pages, page_list);
  905. count_vm_events(PGACTIVATE, pgactivate);
  906. *ret_nr_dirty += nr_dirty;
  907. *ret_nr_writeback += nr_writeback;
  908. return nr_reclaimed;
  909. }
  910. /*
  911. * Attempt to remove the specified page from its LRU. Only take this page
  912. * if it is of the appropriate PageActive status. Pages which are being
  913. * freed elsewhere are also ignored.
  914. *
  915. * page: page to consider
  916. * mode: one of the LRU isolation modes defined above
  917. *
  918. * returns 0 on success, -ve errno on failure.
  919. */
  920. int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
  921. {
  922. bool all_lru_mode;
  923. int ret = -EINVAL;
  924. /* Only take pages on the LRU. */
  925. if (!PageLRU(page))
  926. return ret;
  927. all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
  928. (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
  929. /*
  930. * When checking the active state, we need to be sure we are
  931. * dealing with comparible boolean values. Take the logical not
  932. * of each.
  933. */
  934. if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
  935. return ret;
  936. if (!all_lru_mode && !!page_is_file_cache(page) != file)
  937. return ret;
  938. /*
  939. * When this function is being called for lumpy reclaim, we
  940. * initially look into all LRU pages, active, inactive and
  941. * unevictable; only give shrink_page_list evictable pages.
  942. */
  943. if (PageUnevictable(page))
  944. return ret;
  945. ret = -EBUSY;
  946. /*
  947. * To minimise LRU disruption, the caller can indicate that it only
  948. * wants to isolate pages it will be able to operate on without
  949. * blocking - clean pages for the most part.
  950. *
  951. * ISOLATE_CLEAN means that only clean pages should be isolated. This
  952. * is used by reclaim when it is cannot write to backing storage
  953. *
  954. * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
  955. * that it is possible to migrate without blocking
  956. */
  957. if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
  958. /* All the caller can do on PageWriteback is block */
  959. if (PageWriteback(page))
  960. return ret;
  961. if (PageDirty(page)) {
  962. struct address_space *mapping;
  963. /* ISOLATE_CLEAN means only clean pages */
  964. if (mode & ISOLATE_CLEAN)
  965. return ret;
  966. /*
  967. * Only pages without mappings or that have a
  968. * ->migratepage callback are possible to migrate
  969. * without blocking
  970. */
  971. mapping = page_mapping(page);
  972. if (mapping && !mapping->a_ops->migratepage)
  973. return ret;
  974. }
  975. }
  976. if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
  977. return ret;
  978. if (likely(get_page_unless_zero(page))) {
  979. /*
  980. * Be careful not to clear PageLRU until after we're
  981. * sure the page is not being freed elsewhere -- the
  982. * page release code relies on it.
  983. */
  984. ClearPageLRU(page);
  985. ret = 0;
  986. }
  987. return ret;
  988. }
  989. /*
  990. * zone->lru_lock is heavily contended. Some of the functions that
  991. * shrink the lists perform better by taking out a batch of pages
  992. * and working on them outside the LRU lock.
  993. *
  994. * For pagecache intensive workloads, this function is the hottest
  995. * spot in the kernel (apart from copy_*_user functions).
  996. *
  997. * Appropriate locks must be held before calling this function.
  998. *
  999. * @nr_to_scan: The number of pages to look through on the list.
  1000. * @mz: The mem_cgroup_zone to pull pages from.
  1001. * @dst: The temp list to put pages on to.
  1002. * @nr_scanned: The number of pages that were scanned.
  1003. * @sc: The scan_control struct for this reclaim session
  1004. * @mode: One of the LRU isolation modes
  1005. * @active: True [1] if isolating active pages
  1006. * @file: True [1] if isolating file [!anon] pages
  1007. *
  1008. * returns how many pages were moved onto *@dst.
  1009. */
  1010. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  1011. struct mem_cgroup_zone *mz, struct list_head *dst,
  1012. unsigned long *nr_scanned, struct scan_control *sc,
  1013. isolate_mode_t mode, int active, int file)
  1014. {
  1015. struct lruvec *lruvec;
  1016. struct list_head *src;
  1017. unsigned long nr_taken = 0;
  1018. unsigned long nr_lumpy_taken = 0;
  1019. unsigned long nr_lumpy_dirty = 0;
  1020. unsigned long nr_lumpy_failed = 0;
  1021. unsigned long scan;
  1022. int lru = LRU_BASE;
  1023. lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
  1024. if (active)
  1025. lru += LRU_ACTIVE;
  1026. if (file)
  1027. lru += LRU_FILE;
  1028. src = &lruvec->lists[lru];
  1029. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  1030. struct page *page;
  1031. unsigned long pfn;
  1032. unsigned long end_pfn;
  1033. unsigned long page_pfn;
  1034. int zone_id;
  1035. page = lru_to_page(src);
  1036. prefetchw_prev_lru_page(page, src, flags);
  1037. VM_BUG_ON(!PageLRU(page));
  1038. switch (__isolate_lru_page(page, mode, file)) {
  1039. case 0:
  1040. mem_cgroup_lru_del(page);
  1041. list_move(&page->lru, dst);
  1042. nr_taken += hpage_nr_pages(page);
  1043. break;
  1044. case -EBUSY:
  1045. /* else it is being freed elsewhere */
  1046. list_move(&page->lru, src);
  1047. continue;
  1048. default:
  1049. BUG();
  1050. }
  1051. if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
  1052. continue;
  1053. /*
  1054. * Attempt to take all pages in the order aligned region
  1055. * surrounding the tag page. Only take those pages of
  1056. * the same active state as that tag page. We may safely
  1057. * round the target page pfn down to the requested order
  1058. * as the mem_map is guaranteed valid out to MAX_ORDER,
  1059. * where that page is in a different zone we will detect
  1060. * it from its zone id and abort this block scan.
  1061. */
  1062. zone_id = page_zone_id(page);
  1063. page_pfn = page_to_pfn(page);
  1064. pfn = page_pfn & ~((1 << sc->order) - 1);
  1065. end_pfn = pfn + (1 << sc->order);
  1066. for (; pfn < end_pfn; pfn++) {
  1067. struct page *cursor_page;
  1068. /* The target page is in the block, ignore it. */
  1069. if (unlikely(pfn == page_pfn))
  1070. continue;
  1071. /* Avoid holes within the zone. */
  1072. if (unlikely(!pfn_valid_within(pfn)))
  1073. break;
  1074. cursor_page = pfn_to_page(pfn);
  1075. /* Check that we have not crossed a zone boundary. */
  1076. if (unlikely(page_zone_id(cursor_page) != zone_id))
  1077. break;
  1078. /*
  1079. * If we don't have enough swap space, reclaiming of
  1080. * anon page which don't already have a swap slot is
  1081. * pointless.
  1082. */
  1083. if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
  1084. !PageSwapCache(cursor_page))
  1085. break;
  1086. if (__isolate_lru_page(cursor_page, mode, file) == 0) {
  1087. unsigned int isolated_pages;
  1088. mem_cgroup_lru_del(cursor_page);
  1089. list_move(&cursor_page->lru, dst);
  1090. isolated_pages = hpage_nr_pages(cursor_page);
  1091. nr_taken += isolated_pages;
  1092. nr_lumpy_taken += isolated_pages;
  1093. if (PageDirty(cursor_page))
  1094. nr_lumpy_dirty += isolated_pages;
  1095. scan++;
  1096. pfn += isolated_pages - 1;
  1097. } else {
  1098. /*
  1099. * Check if the page is freed already.
  1100. *
  1101. * We can't use page_count() as that
  1102. * requires compound_head and we don't
  1103. * have a pin on the page here. If a
  1104. * page is tail, we may or may not
  1105. * have isolated the head, so assume
  1106. * it's not free, it'd be tricky to
  1107. * track the head status without a
  1108. * page pin.
  1109. */
  1110. if (!PageTail(cursor_page) &&
  1111. !atomic_read(&cursor_page->_count))
  1112. continue;
  1113. break;
  1114. }
  1115. }
  1116. /* If we break out of the loop above, lumpy reclaim failed */
  1117. if (pfn < end_pfn)
  1118. nr_lumpy_failed++;
  1119. }
  1120. *nr_scanned = scan;
  1121. trace_mm_vmscan_lru_isolate(sc->order,
  1122. nr_to_scan, scan,
  1123. nr_taken,
  1124. nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
  1125. mode, file);
  1126. return nr_taken;
  1127. }
  1128. /**
  1129. * isolate_lru_page - tries to isolate a page from its LRU list
  1130. * @page: page to isolate from its LRU list
  1131. *
  1132. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  1133. * vmstat statistic corresponding to whatever LRU list the page was on.
  1134. *
  1135. * Returns 0 if the page was removed from an LRU list.
  1136. * Returns -EBUSY if the page was not on an LRU list.
  1137. *
  1138. * The returned page will have PageLRU() cleared. If it was found on
  1139. * the active list, it will have PageActive set. If it was found on
  1140. * the unevictable list, it will have the PageUnevictable bit set. That flag
  1141. * may need to be cleared by the caller before letting the page go.
  1142. *
  1143. * The vmstat statistic corresponding to the list on which the page was
  1144. * found will be decremented.
  1145. *
  1146. * Restrictions:
  1147. * (1) Must be called with an elevated refcount on the page. This is a
  1148. * fundamentnal difference from isolate_lru_pages (which is called
  1149. * without a stable reference).
  1150. * (2) the lru_lock must not be held.
  1151. * (3) interrupts must be enabled.
  1152. */
  1153. int isolate_lru_page(struct page *page)
  1154. {
  1155. int ret = -EBUSY;
  1156. VM_BUG_ON(!page_count(page));
  1157. if (PageLRU(page)) {
  1158. struct zone *zone = page_zone(page);
  1159. spin_lock_irq(&zone->lru_lock);
  1160. if (PageLRU(page)) {
  1161. int lru = page_lru(page);
  1162. ret = 0;
  1163. get_page(page);
  1164. ClearPageLRU(page);
  1165. del_page_from_lru_list(zone, page, lru);
  1166. }
  1167. spin_unlock_irq(&zone->lru_lock);
  1168. }
  1169. return ret;
  1170. }
  1171. /*
  1172. * Are there way too many processes in the direct reclaim path already?
  1173. */
  1174. static int too_many_isolated(struct zone *zone, int file,
  1175. struct scan_control *sc)
  1176. {
  1177. unsigned long inactive, isolated;
  1178. if (current_is_kswapd())
  1179. return 0;
  1180. if (!global_reclaim(sc))
  1181. return 0;
  1182. if (file) {
  1183. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1184. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  1185. } else {
  1186. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1187. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  1188. }
  1189. return isolated > inactive;
  1190. }
  1191. static noinline_for_stack void
  1192. putback_inactive_pages(struct mem_cgroup_zone *mz,
  1193. struct list_head *page_list)
  1194. {
  1195. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
  1196. struct zone *zone = mz->zone;
  1197. LIST_HEAD(pages_to_free);
  1198. /*
  1199. * Put back any unfreeable pages.
  1200. */
  1201. while (!list_empty(page_list)) {
  1202. struct page *page = lru_to_page(page_list);
  1203. int lru;
  1204. VM_BUG_ON(PageLRU(page));
  1205. list_del(&page->lru);
  1206. if (unlikely(!page_evictable(page, NULL))) {
  1207. spin_unlock_irq(&zone->lru_lock);
  1208. putback_lru_page(page);
  1209. spin_lock_irq(&zone->lru_lock);
  1210. continue;
  1211. }
  1212. SetPageLRU(page);
  1213. lru = page_lru(page);
  1214. add_page_to_lru_list(zone, page, lru);
  1215. if (is_active_lru(lru)) {
  1216. int file = is_file_lru(lru);
  1217. int numpages = hpage_nr_pages(page);
  1218. reclaim_stat->recent_rotated[file] += numpages;
  1219. }
  1220. if (put_page_testzero(page)) {
  1221. __ClearPageLRU(page);
  1222. __ClearPageActive(page);
  1223. del_page_from_lru_list(zone, page, lru);
  1224. if (unlikely(PageCompound(page))) {
  1225. spin_unlock_irq(&zone->lru_lock);
  1226. (*get_compound_page_dtor(page))(page);
  1227. spin_lock_irq(&zone->lru_lock);
  1228. } else
  1229. list_add(&page->lru, &pages_to_free);
  1230. }
  1231. }
  1232. /*
  1233. * To save our caller's stack, now use input list for pages to free.
  1234. */
  1235. list_splice(&pages_to_free, page_list);
  1236. }
  1237. static noinline_for_stack void
  1238. update_isolated_counts(struct mem_cgroup_zone *mz,
  1239. struct list_head *page_list,
  1240. unsigned long *nr_anon,
  1241. unsigned long *nr_file)
  1242. {
  1243. struct zone *zone = mz->zone;
  1244. unsigned int count[NR_LRU_LISTS] = { 0, };
  1245. unsigned long nr_active = 0;
  1246. struct page *page;
  1247. int lru;
  1248. /*
  1249. * Count pages and clear active flags
  1250. */
  1251. list_for_each_entry(page, page_list, lru) {
  1252. int numpages = hpage_nr_pages(page);
  1253. lru = page_lru_base_type(page);
  1254. if (PageActive(page)) {
  1255. lru += LRU_ACTIVE;
  1256. ClearPageActive(page);
  1257. nr_active += numpages;
  1258. }
  1259. count[lru] += numpages;
  1260. }
  1261. preempt_disable();
  1262. __count_vm_events(PGDEACTIVATE, nr_active);
  1263. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  1264. -count[LRU_ACTIVE_FILE]);
  1265. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  1266. -count[LRU_INACTIVE_FILE]);
  1267. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  1268. -count[LRU_ACTIVE_ANON]);
  1269. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  1270. -count[LRU_INACTIVE_ANON]);
  1271. *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
  1272. *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
  1273. __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
  1274. __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
  1275. preempt_enable();
  1276. }
  1277. /*
  1278. * Returns true if a direct reclaim should wait on pages under writeback.
  1279. *
  1280. * If we are direct reclaiming for contiguous pages and we do not reclaim
  1281. * everything in the list, try again and wait for writeback IO to complete.
  1282. * This will stall high-order allocations noticeably. Only do that when really
  1283. * need to free the pages under high memory pressure.
  1284. */
  1285. static inline bool should_reclaim_stall(unsigned long nr_taken,
  1286. unsigned long nr_freed,
  1287. int priority,
  1288. struct scan_control *sc)
  1289. {
  1290. int lumpy_stall_priority;
  1291. /* kswapd should not stall on sync IO */
  1292. if (current_is_kswapd())
  1293. return false;
  1294. /* Only stall on lumpy reclaim */
  1295. if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
  1296. return false;
  1297. /* If we have reclaimed everything on the isolated list, no stall */
  1298. if (nr_freed == nr_taken)
  1299. return false;
  1300. /*
  1301. * For high-order allocations, there are two stall thresholds.
  1302. * High-cost allocations stall immediately where as lower
  1303. * order allocations such as stacks require the scanning
  1304. * priority to be much higher before stalling.
  1305. */
  1306. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  1307. lumpy_stall_priority = DEF_PRIORITY;
  1308. else
  1309. lumpy_stall_priority = DEF_PRIORITY / 3;
  1310. return priority <= lumpy_stall_priority;
  1311. }
  1312. /*
  1313. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  1314. * of reclaimed pages
  1315. */
  1316. static noinline_for_stack unsigned long
  1317. shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
  1318. struct scan_control *sc, int priority, int file)
  1319. {
  1320. LIST_HEAD(page_list);
  1321. unsigned long nr_scanned;
  1322. unsigned long nr_reclaimed = 0;
  1323. unsigned long nr_taken;
  1324. unsigned long nr_anon;
  1325. unsigned long nr_file;
  1326. unsigned long nr_dirty = 0;
  1327. unsigned long nr_writeback = 0;
  1328. isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
  1329. struct zone *zone = mz->zone;
  1330. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
  1331. while (unlikely(too_many_isolated(zone, file, sc))) {
  1332. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1333. /* We are about to die and free our memory. Return now. */
  1334. if (fatal_signal_pending(current))
  1335. return SWAP_CLUSTER_MAX;
  1336. }
  1337. set_reclaim_mode(priority, sc, false);
  1338. if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
  1339. isolate_mode |= ISOLATE_ACTIVE;
  1340. lru_add_drain();
  1341. if (!sc->may_unmap)
  1342. isolate_mode |= ISOLATE_UNMAPPED;
  1343. if (!sc->may_writepage)
  1344. isolate_mode |= ISOLATE_CLEAN;
  1345. spin_lock_irq(&zone->lru_lock);
  1346. nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
  1347. sc, isolate_mode, 0, file);
  1348. if (global_reclaim(sc)) {
  1349. zone->pages_scanned += nr_scanned;
  1350. if (current_is_kswapd())
  1351. __count_zone_vm_events(PGSCAN_KSWAPD, zone,
  1352. nr_scanned);
  1353. else
  1354. __count_zone_vm_events(PGSCAN_DIRECT, zone,
  1355. nr_scanned);
  1356. }
  1357. spin_unlock_irq(&zone->lru_lock);
  1358. if (nr_taken == 0)
  1359. return 0;
  1360. update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
  1361. nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
  1362. &nr_dirty, &nr_writeback);
  1363. /* Check if we should syncronously wait for writeback */
  1364. if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
  1365. set_reclaim_mode(priority, sc, true);
  1366. nr_reclaimed += shrink_page_list(&page_list, mz, sc,
  1367. priority, &nr_dirty, &nr_writeback);
  1368. }
  1369. spin_lock_irq(&zone->lru_lock);
  1370. reclaim_stat->recent_scanned[0] += nr_anon;
  1371. reclaim_stat->recent_scanned[1] += nr_file;
  1372. if (current_is_kswapd())
  1373. __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
  1374. __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
  1375. putback_inactive_pages(mz, &page_list);
  1376. __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
  1377. __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
  1378. spin_unlock_irq(&zone->lru_lock);
  1379. free_hot_cold_page_list(&page_list, 1);
  1380. /*
  1381. * If reclaim is isolating dirty pages under writeback, it implies
  1382. * that the long-lived page allocation rate is exceeding the page
  1383. * laundering rate. Either the global limits are not being effective
  1384. * at throttling processes due to the page distribution throughout
  1385. * zones or there is heavy usage of a slow backing device. The
  1386. * only option is to throttle from reclaim context which is not ideal
  1387. * as there is no guarantee the dirtying process is throttled in the
  1388. * same way balance_dirty_pages() manages.
  1389. *
  1390. * This scales the number of dirty pages that must be under writeback
  1391. * before throttling depending on priority. It is a simple backoff
  1392. * function that has the most effect in the range DEF_PRIORITY to
  1393. * DEF_PRIORITY-2 which is the priority reclaim is considered to be
  1394. * in trouble and reclaim is considered to be in trouble.
  1395. *
  1396. * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
  1397. * DEF_PRIORITY-1 50% must be PageWriteback
  1398. * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
  1399. * ...
  1400. * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
  1401. * isolated page is PageWriteback
  1402. */
  1403. if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
  1404. wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
  1405. trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
  1406. zone_idx(zone),
  1407. nr_scanned, nr_reclaimed,
  1408. priority,
  1409. trace_shrink_flags(file, sc->reclaim_mode));
  1410. return nr_reclaimed;
  1411. }
  1412. /*
  1413. * This moves pages from the active list to the inactive list.
  1414. *
  1415. * We move them the other way if the page is referenced by one or more
  1416. * processes, from rmap.
  1417. *
  1418. * If the pages are mostly unmapped, the processing is fast and it is
  1419. * appropriate to hold zone->lru_lock across the whole operation. But if
  1420. * the pages are mapped, the processing is slow (page_referenced()) so we
  1421. * should drop zone->lru_lock around each page. It's impossible to balance
  1422. * this, so instead we remove the pages from the LRU while processing them.
  1423. * It is safe to rely on PG_active against the non-LRU pages in here because
  1424. * nobody will play with that bit on a non-LRU page.
  1425. *
  1426. * The downside is that we have to touch page->_count against each page.
  1427. * But we had to alter page->flags anyway.
  1428. */
  1429. static void move_active_pages_to_lru(struct zone *zone,
  1430. struct list_head *list,
  1431. struct list_head *pages_to_free,
  1432. enum lru_list lru)
  1433. {
  1434. unsigned long pgmoved = 0;
  1435. struct page *page;
  1436. while (!list_empty(list)) {
  1437. struct lruvec *lruvec;
  1438. page = lru_to_page(list);
  1439. VM_BUG_ON(PageLRU(page));
  1440. SetPageLRU(page);
  1441. lruvec = mem_cgroup_lru_add_list(zone, page, lru);
  1442. list_move(&page->lru, &lruvec->lists[lru]);
  1443. pgmoved += hpage_nr_pages(page);
  1444. if (put_page_testzero(page)) {
  1445. __ClearPageLRU(page);
  1446. __ClearPageActive(page);
  1447. del_page_from_lru_list(zone, page, lru);
  1448. if (unlikely(PageCompound(page))) {
  1449. spin_unlock_irq(&zone->lru_lock);
  1450. (*get_compound_page_dtor(page))(page);
  1451. spin_lock_irq(&zone->lru_lock);
  1452. } else
  1453. list_add(&page->lru, pages_to_free);
  1454. }
  1455. }
  1456. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1457. if (!is_active_lru(lru))
  1458. __count_vm_events(PGDEACTIVATE, pgmoved);
  1459. }
  1460. static void shrink_active_list(unsigned long nr_to_scan,
  1461. struct mem_cgroup_zone *mz,
  1462. struct scan_control *sc,
  1463. int priority, int file)
  1464. {
  1465. unsigned long nr_taken;
  1466. unsigned long nr_scanned;
  1467. unsigned long vm_flags;
  1468. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1469. LIST_HEAD(l_active);
  1470. LIST_HEAD(l_inactive);
  1471. struct page *page;
  1472. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
  1473. unsigned long nr_rotated = 0;
  1474. isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
  1475. struct zone *zone = mz->zone;
  1476. lru_add_drain();
  1477. reset_reclaim_mode(sc);
  1478. if (!sc->may_unmap)
  1479. isolate_mode |= ISOLATE_UNMAPPED;
  1480. if (!sc->may_writepage)
  1481. isolate_mode |= ISOLATE_CLEAN;
  1482. spin_lock_irq(&zone->lru_lock);
  1483. nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
  1484. isolate_mode, 1, file);
  1485. if (global_reclaim(sc))
  1486. zone->pages_scanned += nr_scanned;
  1487. reclaim_stat->recent_scanned[file] += nr_taken;
  1488. __count_zone_vm_events(PGREFILL, zone, nr_scanned);
  1489. if (file)
  1490. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
  1491. else
  1492. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
  1493. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1494. spin_unlock_irq(&zone->lru_lock);
  1495. while (!list_empty(&l_hold)) {
  1496. cond_resched();
  1497. page = lru_to_page(&l_hold);
  1498. list_del(&page->lru);
  1499. if (unlikely(!page_evictable(page, NULL))) {
  1500. putback_lru_page(page);
  1501. continue;
  1502. }
  1503. if (unlikely(buffer_heads_over_limit)) {
  1504. if (page_has_private(page) && trylock_page(page)) {
  1505. if (page_has_private(page))
  1506. try_to_release_page(page, 0);
  1507. unlock_page(page);
  1508. }
  1509. }
  1510. if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
  1511. nr_rotated += hpage_nr_pages(page);
  1512. /*
  1513. * Identify referenced, file-backed active pages and
  1514. * give them one more trip around the active list. So
  1515. * that executable code get better chances to stay in
  1516. * memory under moderate memory pressure. Anon pages
  1517. * are not likely to be evicted by use-once streaming
  1518. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1519. * so we ignore them here.
  1520. */
  1521. if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
  1522. list_add(&page->lru, &l_active);
  1523. continue;
  1524. }
  1525. }
  1526. ClearPageActive(page); /* we are de-activating */
  1527. list_add(&page->lru, &l_inactive);
  1528. }
  1529. /*
  1530. * Move pages back to the lru list.
  1531. */
  1532. spin_lock_irq(&zone->lru_lock);
  1533. /*
  1534. * Count referenced pages from currently used mappings as rotated,
  1535. * even though only some of them are actually re-activated. This
  1536. * helps balance scan pressure between file and anonymous pages in
  1537. * get_scan_ratio.
  1538. */
  1539. reclaim_stat->recent_rotated[file] += nr_rotated;
  1540. move_active_pages_to_lru(zone, &l_active, &l_hold,
  1541. LRU_ACTIVE + file * LRU_FILE);
  1542. move_active_pages_to_lru(zone, &l_inactive, &l_hold,
  1543. LRU_BASE + file * LRU_FILE);
  1544. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1545. spin_unlock_irq(&zone->lru_lock);
  1546. free_hot_cold_page_list(&l_hold, 1);
  1547. }
  1548. #ifdef CONFIG_SWAP
  1549. static int inactive_anon_is_low_global(struct zone *zone)
  1550. {
  1551. unsigned long active, inactive;
  1552. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1553. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1554. if (inactive * zone->inactive_ratio < active)
  1555. return 1;
  1556. return 0;
  1557. }
  1558. /**
  1559. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1560. * @zone: zone to check
  1561. * @sc: scan control of this context
  1562. *
  1563. * Returns true if the zone does not have enough inactive anon pages,
  1564. * meaning some active anon pages need to be deactivated.
  1565. */
  1566. static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
  1567. {
  1568. /*
  1569. * If we don't have swap space, anonymous page deactivation
  1570. * is pointless.
  1571. */
  1572. if (!total_swap_pages)
  1573. return 0;
  1574. if (!scanning_global_lru(mz))
  1575. return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
  1576. mz->zone);
  1577. return inactive_anon_is_low_global(mz->zone);
  1578. }
  1579. #else
  1580. static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
  1581. {
  1582. return 0;
  1583. }
  1584. #endif
  1585. static int inactive_file_is_low_global(struct zone *zone)
  1586. {
  1587. unsigned long active, inactive;
  1588. active = zone_page_state(zone, NR_ACTIVE_FILE);
  1589. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1590. return (active > inactive);
  1591. }
  1592. /**
  1593. * inactive_file_is_low - check if file pages need to be deactivated
  1594. * @mz: memory cgroup and zone to check
  1595. *
  1596. * When the system is doing streaming IO, memory pressure here
  1597. * ensures that active file pages get deactivated, until more
  1598. * than half of the file pages are on the inactive list.
  1599. *
  1600. * Once we get to that situation, protect the system's working
  1601. * set from being evicted by disabling active file page aging.
  1602. *
  1603. * This uses a different ratio than the anonymous pages, because
  1604. * the page cache uses a use-once replacement algorithm.
  1605. */
  1606. static int inactive_file_is_low(struct mem_cgroup_zone *mz)
  1607. {
  1608. if (!scanning_global_lru(mz))
  1609. return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
  1610. mz->zone);
  1611. return inactive_file_is_low_global(mz->zone);
  1612. }
  1613. static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
  1614. {
  1615. if (file)
  1616. return inactive_file_is_low(mz);
  1617. else
  1618. return inactive_anon_is_low(mz);
  1619. }
  1620. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1621. struct mem_cgroup_zone *mz,
  1622. struct scan_control *sc, int priority)
  1623. {
  1624. int file = is_file_lru(lru);
  1625. if (is_active_lru(lru)) {
  1626. if (inactive_list_is_low(mz, file))
  1627. shrink_active_list(nr_to_scan, mz, sc, priority, file);
  1628. return 0;
  1629. }
  1630. return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
  1631. }
  1632. static int vmscan_swappiness(struct mem_cgroup_zone *mz,
  1633. struct scan_control *sc)
  1634. {
  1635. if (global_reclaim(sc))
  1636. return vm_swappiness;
  1637. return mem_cgroup_swappiness(mz->mem_cgroup);
  1638. }
  1639. /*
  1640. * Determine how aggressively the anon and file LRU lists should be
  1641. * scanned. The relative value of each set of LRU lists is determined
  1642. * by looking at the fraction of the pages scanned we did rotate back
  1643. * onto the active list instead of evict.
  1644. *
  1645. * nr[0] = anon pages to scan; nr[1] = file pages to scan
  1646. */
  1647. static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
  1648. unsigned long *nr, int priority)
  1649. {
  1650. unsigned long anon, file, free;
  1651. unsigned long anon_prio, file_prio;
  1652. unsigned long ap, fp;
  1653. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
  1654. u64 fraction[2], denominator;
  1655. enum lru_list lru;
  1656. int noswap = 0;
  1657. bool force_scan = false;
  1658. /*
  1659. * If the zone or memcg is small, nr[l] can be 0. This
  1660. * results in no scanning on this priority and a potential
  1661. * priority drop. Global direct reclaim can go to the next
  1662. * zone and tends to have no problems. Global kswapd is for
  1663. * zone balancing and it needs to scan a minimum amount. When
  1664. * reclaiming for a memcg, a priority drop can cause high
  1665. * latencies, so it's better to scan a minimum amount there as
  1666. * well.
  1667. */
  1668. if (current_is_kswapd() && mz->zone->all_unreclaimable)
  1669. force_scan = true;
  1670. if (!global_reclaim(sc))
  1671. force_scan = true;
  1672. /* If we have no swap space, do not bother scanning anon pages. */
  1673. if (!sc->may_swap || (nr_swap_pages <= 0)) {
  1674. noswap = 1;
  1675. fraction[0] = 0;
  1676. fraction[1] = 1;
  1677. denominator = 1;
  1678. goto out;
  1679. }
  1680. anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
  1681. zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
  1682. file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
  1683. zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
  1684. if (global_reclaim(sc)) {
  1685. free = zone_page_state(mz->zone, NR_FREE_PAGES);
  1686. /* If we have very few page cache pages,
  1687. force-scan anon pages. */
  1688. if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
  1689. fraction[0] = 1;
  1690. fraction[1] = 0;
  1691. denominator = 1;
  1692. goto out;
  1693. }
  1694. }
  1695. /*
  1696. * With swappiness at 100, anonymous and file have the same priority.
  1697. * This scanning priority is essentially the inverse of IO cost.
  1698. */
  1699. anon_prio = vmscan_swappiness(mz, sc);
  1700. file_prio = 200 - vmscan_swappiness(mz, sc);
  1701. /*
  1702. * OK, so we have swap space and a fair amount of page cache
  1703. * pages. We use the recently rotated / recently scanned
  1704. * ratios to determine how valuable each cache is.
  1705. *
  1706. * Because workloads change over time (and to avoid overflow)
  1707. * we keep these statistics as a floating average, which ends
  1708. * up weighing recent references more than old ones.
  1709. *
  1710. * anon in [0], file in [1]
  1711. */
  1712. spin_lock_irq(&mz->zone->lru_lock);
  1713. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1714. reclaim_stat->recent_scanned[0] /= 2;
  1715. reclaim_stat->recent_rotated[0] /= 2;
  1716. }
  1717. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1718. reclaim_stat->recent_scanned[1] /= 2;
  1719. reclaim_stat->recent_rotated[1] /= 2;
  1720. }
  1721. /*
  1722. * The amount of pressure on anon vs file pages is inversely
  1723. * proportional to the fraction of recently scanned pages on
  1724. * each list that were recently referenced and in active use.
  1725. */
  1726. ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
  1727. ap /= reclaim_stat->recent_rotated[0] + 1;
  1728. fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
  1729. fp /= reclaim_stat->recent_rotated[1] + 1;
  1730. spin_unlock_irq(&mz->zone->lru_lock);
  1731. fraction[0] = ap;
  1732. fraction[1] = fp;
  1733. denominator = ap + fp + 1;
  1734. out:
  1735. for_each_evictable_lru(lru) {
  1736. int file = is_file_lru(lru);
  1737. unsigned long scan;
  1738. scan = zone_nr_lru_pages(mz, lru);
  1739. if (priority || noswap) {
  1740. scan >>= priority;
  1741. if (!scan && force_scan)
  1742. scan = SWAP_CLUSTER_MAX;
  1743. scan = div64_u64(scan * fraction[file], denominator);
  1744. }
  1745. nr[lru] = scan;
  1746. }
  1747. }
  1748. /*
  1749. * Reclaim/compaction depends on a number of pages being freed. To avoid
  1750. * disruption to the system, a small number of order-0 pages continue to be
  1751. * rotated and reclaimed in the normal fashion. However, by the time we get
  1752. * back to the allocator and call try_to_compact_zone(), we ensure that
  1753. * there are enough free pages for it to be likely successful
  1754. */
  1755. static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
  1756. unsigned long nr_reclaimed,
  1757. unsigned long nr_scanned,
  1758. struct scan_control *sc)
  1759. {
  1760. unsigned long pages_for_compaction;
  1761. unsigned long inactive_lru_pages;
  1762. /* If not in reclaim/compaction mode, stop */
  1763. if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
  1764. return false;
  1765. /* Consider stopping depending on scan and reclaim activity */
  1766. if (sc->gfp_mask & __GFP_REPEAT) {
  1767. /*
  1768. * For __GFP_REPEAT allocations, stop reclaiming if the
  1769. * full LRU list has been scanned and we are still failing
  1770. * to reclaim pages. This full LRU scan is potentially
  1771. * expensive but a __GFP_REPEAT caller really wants to succeed
  1772. */
  1773. if (!nr_reclaimed && !nr_scanned)
  1774. return false;
  1775. } else {
  1776. /*
  1777. * For non-__GFP_REPEAT allocations which can presumably
  1778. * fail without consequence, stop if we failed to reclaim
  1779. * any pages from the last SWAP_CLUSTER_MAX number of
  1780. * pages that were scanned. This will return to the
  1781. * caller faster at the risk reclaim/compaction and
  1782. * the resulting allocation attempt fails
  1783. */
  1784. if (!nr_reclaimed)
  1785. return false;
  1786. }
  1787. /*
  1788. * If we have not reclaimed enough pages for compaction and the
  1789. * inactive lists are large enough, continue reclaiming
  1790. */
  1791. pages_for_compaction = (2UL << sc->order);
  1792. inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
  1793. if (nr_swap_pages > 0)
  1794. inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
  1795. if (sc->nr_reclaimed < pages_for_compaction &&
  1796. inactive_lru_pages > pages_for_compaction)
  1797. return true;
  1798. /* If compaction would go ahead or the allocation would succeed, stop */
  1799. switch (compaction_suitable(mz->zone, sc->order)) {
  1800. case COMPACT_PARTIAL:
  1801. case COMPACT_CONTINUE:
  1802. return false;
  1803. default:
  1804. return true;
  1805. }
  1806. }
  1807. /*
  1808. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1809. */
  1810. static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
  1811. struct scan_control *sc)
  1812. {
  1813. unsigned long nr[NR_LRU_LISTS];
  1814. unsigned long nr_to_scan;
  1815. enum lru_list lru;
  1816. unsigned long nr_reclaimed, nr_scanned;
  1817. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  1818. struct blk_plug plug;
  1819. restart:
  1820. nr_reclaimed = 0;
  1821. nr_scanned = sc->nr_scanned;
  1822. get_scan_count(mz, sc, nr, priority);
  1823. blk_start_plug(&plug);
  1824. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1825. nr[LRU_INACTIVE_FILE]) {
  1826. for_each_evictable_lru(lru) {
  1827. if (nr[lru]) {
  1828. nr_to_scan = min_t(unsigned long,
  1829. nr[lru], SWAP_CLUSTER_MAX);
  1830. nr[lru] -= nr_to_scan;
  1831. nr_reclaimed += shrink_list(lru, nr_to_scan,
  1832. mz, sc, priority);
  1833. }
  1834. }
  1835. /*
  1836. * On large memory systems, scan >> priority can become
  1837. * really large. This is fine for the starting priority;
  1838. * we want to put equal scanning pressure on each zone.
  1839. * However, if the VM has a harder time of freeing pages,
  1840. * with multiple processes reclaiming pages, the total
  1841. * freeing target can get unreasonably large.
  1842. */
  1843. if (nr_reclaimed >= nr_to_reclaim)
  1844. nr_to_reclaim = 0;
  1845. else
  1846. nr_to_reclaim -= nr_reclaimed;
  1847. if (!nr_to_reclaim && priority < DEF_PRIORITY)
  1848. break;
  1849. }
  1850. blk_finish_plug(&plug);
  1851. sc->nr_reclaimed += nr_reclaimed;
  1852. /*
  1853. * Even if we did not try to evict anon pages at all, we want to
  1854. * rebalance the anon lru active/inactive ratio.
  1855. */
  1856. if (inactive_anon_is_low(mz))
  1857. shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
  1858. /* reclaim/compaction might need reclaim to continue */
  1859. if (should_continue_reclaim(mz, nr_reclaimed,
  1860. sc->nr_scanned - nr_scanned, sc))
  1861. goto restart;
  1862. throttle_vm_writeout(sc->gfp_mask);
  1863. }
  1864. static void shrink_zone(int priority, struct zone *zone,
  1865. struct scan_control *sc)
  1866. {
  1867. struct mem_cgroup *root = sc->target_mem_cgroup;
  1868. struct mem_cgroup_reclaim_cookie reclaim = {
  1869. .zone = zone,
  1870. .priority = priority,
  1871. };
  1872. struct mem_cgroup *memcg;
  1873. memcg = mem_cgroup_iter(root, NULL, &reclaim);
  1874. do {
  1875. struct mem_cgroup_zone mz = {
  1876. .mem_cgroup = memcg,
  1877. .zone = zone,
  1878. };
  1879. shrink_mem_cgroup_zone(priority, &mz, sc);
  1880. /*
  1881. * Limit reclaim has historically picked one memcg and
  1882. * scanned it with decreasing priority levels until
  1883. * nr_to_reclaim had been reclaimed. This priority
  1884. * cycle is thus over after a single memcg.
  1885. *
  1886. * Direct reclaim and kswapd, on the other hand, have
  1887. * to scan all memory cgroups to fulfill the overall
  1888. * scan target for the zone.
  1889. */
  1890. if (!global_reclaim(sc)) {
  1891. mem_cgroup_iter_break(root, memcg);
  1892. break;
  1893. }
  1894. memcg = mem_cgroup_iter(root, memcg, &reclaim);
  1895. } while (memcg);
  1896. }
  1897. /* Returns true if compaction should go ahead for a high-order request */
  1898. static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  1899. {
  1900. unsigned long balance_gap, watermark;
  1901. bool watermark_ok;
  1902. /* Do not consider compaction for orders reclaim is meant to satisfy */
  1903. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
  1904. return false;
  1905. /*
  1906. * Compaction takes time to run and there are potentially other
  1907. * callers using the pages just freed. Continue reclaiming until
  1908. * there is a buffer of free pages available to give compaction
  1909. * a reasonable chance of completing and allocating the page
  1910. */
  1911. balance_gap = min(low_wmark_pages(zone),
  1912. (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  1913. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  1914. watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
  1915. watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
  1916. /*
  1917. * If compaction is deferred, reclaim up to a point where
  1918. * compaction will have a chance of success when re-enabled
  1919. */
  1920. if (compaction_deferred(zone, sc->order))
  1921. return watermark_ok;
  1922. /* If compaction is not ready to start, keep reclaiming */
  1923. if (!compaction_suitable(zone, sc->order))
  1924. return false;
  1925. return watermark_ok;
  1926. }
  1927. /*
  1928. * This is the direct reclaim path, for page-allocating processes. We only
  1929. * try to reclaim pages from zones which will satisfy the caller's allocation
  1930. * request.
  1931. *
  1932. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1933. * Because:
  1934. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1935. * allocation or
  1936. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1937. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1938. * zone defense algorithm.
  1939. *
  1940. * If a zone is deemed to be full of pinned pages then just give it a light
  1941. * scan then give up on it.
  1942. *
  1943. * This function returns true if a zone is being reclaimed for a costly
  1944. * high-order allocation and compaction is ready to begin. This indicates to
  1945. * the caller that it should consider retrying the allocation instead of
  1946. * further reclaim.
  1947. */
  1948. static bool shrink_zones(int priority, struct zonelist *zonelist,
  1949. struct scan_control *sc)
  1950. {
  1951. struct zoneref *z;
  1952. struct zone *zone;
  1953. unsigned long nr_soft_reclaimed;
  1954. unsigned long nr_soft_scanned;
  1955. bool aborted_reclaim = false;
  1956. /*
  1957. * If the number of buffer_heads in the machine exceeds the maximum
  1958. * allowed level, force direct reclaim to scan the highmem zone as
  1959. * highmem pages could be pinning lowmem pages storing buffer_heads
  1960. */
  1961. if (buffer_heads_over_limit)
  1962. sc->gfp_mask |= __GFP_HIGHMEM;
  1963. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1964. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1965. if (!populated_zone(zone))
  1966. continue;
  1967. /*
  1968. * Take care memory controller reclaiming has small influence
  1969. * to global LRU.
  1970. */
  1971. if (global_reclaim(sc)) {
  1972. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1973. continue;
  1974. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  1975. continue; /* Let kswapd poll it */
  1976. if (COMPACTION_BUILD) {
  1977. /*
  1978. * If we already have plenty of memory free for
  1979. * compaction in this zone, don't free any more.
  1980. * Even though compaction is invoked for any
  1981. * non-zero order, only frequent costly order
  1982. * reclamation is disruptive enough to become a
  1983. * noticeable problem, like transparent huge
  1984. * page allocations.
  1985. */
  1986. if (compaction_ready(zone, sc)) {
  1987. aborted_reclaim = true;
  1988. continue;
  1989. }
  1990. }
  1991. /*
  1992. * This steals pages from memory cgroups over softlimit
  1993. * and returns the number of reclaimed pages and
  1994. * scanned pages. This works for global memory pressure
  1995. * and balancing, not for a memcg's limit.
  1996. */
  1997. nr_soft_scanned = 0;
  1998. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  1999. sc->order, sc->gfp_mask,
  2000. &nr_soft_scanned);
  2001. sc->nr_reclaimed += nr_soft_reclaimed;
  2002. sc->nr_scanned += nr_soft_scanned;
  2003. /* need some check for avoid more shrink_zone() */
  2004. }
  2005. shrink_zone(priority, zone, sc);
  2006. }
  2007. return aborted_reclaim;
  2008. }
  2009. static bool zone_reclaimable(struct zone *zone)
  2010. {
  2011. return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
  2012. }
  2013. /* All zones in zonelist are unreclaimable? */
  2014. static bool all_unreclaimable(struct zonelist *zonelist,
  2015. struct scan_control *sc)
  2016. {
  2017. struct zoneref *z;
  2018. struct zone *zone;
  2019. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  2020. gfp_zone(sc->gfp_mask), sc->nodemask) {
  2021. if (!populated_zone(zone))
  2022. continue;
  2023. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2024. continue;
  2025. if (!zone->all_unreclaimable)
  2026. return false;
  2027. }
  2028. return true;
  2029. }
  2030. /*
  2031. * This is the main entry point to direct page reclaim.
  2032. *
  2033. * If a full scan of the inactive list fails to free enough memory then we
  2034. * are "out of memory" and something needs to be killed.
  2035. *
  2036. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  2037. * high - the zone may be full of dirty or under-writeback pages, which this
  2038. * caller can't do much about. We kick the writeback threads and take explicit
  2039. * naps in the hope that some of these pages can be written. But if the
  2040. * allocating task holds filesystem locks which prevent writeout this might not
  2041. * work, and the allocation attempt will fail.
  2042. *
  2043. * returns: 0, if no pages reclaimed
  2044. * else, the number of pages reclaimed
  2045. */
  2046. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  2047. struct scan_control *sc,
  2048. struct shrink_control *shrink)
  2049. {
  2050. int priority;
  2051. unsigned long total_scanned = 0;
  2052. struct reclaim_state *reclaim_state = current->reclaim_state;
  2053. struct zoneref *z;
  2054. struct zone *zone;
  2055. unsigned long writeback_threshold;
  2056. bool aborted_reclaim;
  2057. delayacct_freepages_start();
  2058. if (global_reclaim(sc))
  2059. count_vm_event(ALLOCSTALL);
  2060. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  2061. sc->nr_scanned = 0;
  2062. if (!priority)
  2063. disable_swap_token(sc->target_mem_cgroup);
  2064. aborted_reclaim = shrink_zones(priority, zonelist, sc);
  2065. /*
  2066. * Don't shrink slabs when reclaiming memory from
  2067. * over limit cgroups
  2068. */
  2069. if (global_reclaim(sc)) {
  2070. unsigned long lru_pages = 0;
  2071. for_each_zone_zonelist(zone, z, zonelist,
  2072. gfp_zone(sc->gfp_mask)) {
  2073. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2074. continue;
  2075. lru_pages += zone_reclaimable_pages(zone);
  2076. }
  2077. shrink_slab(shrink, sc->nr_scanned, lru_pages);
  2078. if (reclaim_state) {
  2079. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  2080. reclaim_state->reclaimed_slab = 0;
  2081. }
  2082. }
  2083. total_scanned += sc->nr_scanned;
  2084. if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  2085. goto out;
  2086. /*
  2087. * Try to write back as many pages as we just scanned. This
  2088. * tends to cause slow streaming writers to write data to the
  2089. * disk smoothly, at the dirtying rate, which is nice. But
  2090. * that's undesirable in laptop mode, where we *want* lumpy
  2091. * writeout. So in laptop mode, write out the whole world.
  2092. */
  2093. writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  2094. if (total_scanned > writeback_threshold) {
  2095. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
  2096. WB_REASON_TRY_TO_FREE_PAGES);
  2097. sc->may_writepage = 1;
  2098. }
  2099. /* Take a nap, wait for some writeback to complete */
  2100. if (!sc->hibernation_mode && sc->nr_scanned &&
  2101. priority < DEF_PRIORITY - 2) {
  2102. struct zone *preferred_zone;
  2103. first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
  2104. &cpuset_current_mems_allowed,
  2105. &preferred_zone);
  2106. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
  2107. }
  2108. }
  2109. out:
  2110. delayacct_freepages_end();
  2111. if (sc->nr_reclaimed)
  2112. return sc->nr_reclaimed;
  2113. /*
  2114. * As hibernation is going on, kswapd is freezed so that it can't mark
  2115. * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
  2116. * check.
  2117. */
  2118. if (oom_killer_disabled)
  2119. return 0;
  2120. /* Aborted reclaim to try compaction? don't OOM, then */
  2121. if (aborted_reclaim)
  2122. return 1;
  2123. /* top priority shrink_zones still had more to do? don't OOM, then */
  2124. if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
  2125. return 1;
  2126. return 0;
  2127. }
  2128. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  2129. gfp_t gfp_mask, nodemask_t *nodemask)
  2130. {
  2131. unsigned long nr_reclaimed;
  2132. struct scan_control sc = {
  2133. .gfp_mask = gfp_mask,
  2134. .may_writepage = !laptop_mode,
  2135. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2136. .may_unmap = 1,
  2137. .may_swap = 1,
  2138. .order = order,
  2139. .target_mem_cgroup = NULL,
  2140. .nodemask = nodemask,
  2141. };
  2142. struct shrink_control shrink = {
  2143. .gfp_mask = sc.gfp_mask,
  2144. };
  2145. trace_mm_vmscan_direct_reclaim_begin(order,
  2146. sc.may_writepage,
  2147. gfp_mask);
  2148. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2149. trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
  2150. return nr_reclaimed;
  2151. }
  2152. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  2153. unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
  2154. gfp_t gfp_mask, bool noswap,
  2155. struct zone *zone,
  2156. unsigned long *nr_scanned)
  2157. {
  2158. struct scan_control sc = {
  2159. .nr_scanned = 0,
  2160. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2161. .may_writepage = !laptop_mode,
  2162. .may_unmap = 1,
  2163. .may_swap = !noswap,
  2164. .order = 0,
  2165. .target_mem_cgroup = memcg,
  2166. };
  2167. struct mem_cgroup_zone mz = {
  2168. .mem_cgroup = memcg,
  2169. .zone = zone,
  2170. };
  2171. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2172. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  2173. trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
  2174. sc.may_writepage,
  2175. sc.gfp_mask);
  2176. /*
  2177. * NOTE: Although we can get the priority field, using it
  2178. * here is not a good idea, since it limits the pages we can scan.
  2179. * if we don't reclaim here, the shrink_zone from balance_pgdat
  2180. * will pick up pages from other mem cgroup's as well. We hack
  2181. * the priority and make it zero.
  2182. */
  2183. shrink_mem_cgroup_zone(0, &mz, &sc);
  2184. trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
  2185. *nr_scanned = sc.nr_scanned;
  2186. return sc.nr_reclaimed;
  2187. }
  2188. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  2189. gfp_t gfp_mask,
  2190. bool noswap)
  2191. {
  2192. struct zonelist *zonelist;
  2193. unsigned long nr_reclaimed;
  2194. int nid;
  2195. struct scan_control sc = {
  2196. .may_writepage = !laptop_mode,
  2197. .may_unmap = 1,
  2198. .may_swap = !noswap,
  2199. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2200. .order = 0,
  2201. .target_mem_cgroup = memcg,
  2202. .nodemask = NULL, /* we don't care the placement */
  2203. .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2204. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
  2205. };
  2206. struct shrink_control shrink = {
  2207. .gfp_mask = sc.gfp_mask,
  2208. };
  2209. /*
  2210. * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
  2211. * take care of from where we get pages. So the node where we start the
  2212. * scan does not need to be the current node.
  2213. */
  2214. nid = mem_cgroup_select_victim_node(memcg);
  2215. zonelist = NODE_DATA(nid)->node_zonelists;
  2216. trace_mm_vmscan_memcg_reclaim_begin(0,
  2217. sc.may_writepage,
  2218. sc.gfp_mask);
  2219. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2220. trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
  2221. return nr_reclaimed;
  2222. }
  2223. #endif
  2224. static void age_active_anon(struct zone *zone, struct scan_control *sc,
  2225. int priority)
  2226. {
  2227. struct mem_cgroup *memcg;
  2228. if (!total_swap_pages)
  2229. return;
  2230. memcg = mem_cgroup_iter(NULL, NULL, NULL);
  2231. do {
  2232. struct mem_cgroup_zone mz = {
  2233. .mem_cgroup = memcg,
  2234. .zone = zone,
  2235. };
  2236. if (inactive_anon_is_low(&mz))
  2237. shrink_active_list(SWAP_CLUSTER_MAX, &mz,
  2238. sc, priority, 0);
  2239. memcg = mem_cgroup_iter(NULL, memcg, NULL);
  2240. } while (memcg);
  2241. }
  2242. /*
  2243. * pgdat_balanced is used when checking if a node is balanced for high-order
  2244. * allocations. Only zones that meet watermarks and are in a zone allowed
  2245. * by the callers classzone_idx are added to balanced_pages. The total of
  2246. * balanced pages must be at least 25% of the zones allowed by classzone_idx
  2247. * for the node to be considered balanced. Forcing all zones to be balanced
  2248. * for high orders can cause excessive reclaim when there are imbalanced zones.
  2249. * The choice of 25% is due to
  2250. * o a 16M DMA zone that is balanced will not balance a zone on any
  2251. * reasonable sized machine
  2252. * o On all other machines, the top zone must be at least a reasonable
  2253. * percentage of the middle zones. For example, on 32-bit x86, highmem
  2254. * would need to be at least 256M for it to be balance a whole node.
  2255. * Similarly, on x86-64 the Normal zone would need to be at least 1G
  2256. * to balance a node on its own. These seemed like reasonable ratios.
  2257. */
  2258. static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
  2259. int classzone_idx)
  2260. {
  2261. unsigned long present_pages = 0;
  2262. int i;
  2263. for (i = 0; i <= classzone_idx; i++)
  2264. present_pages += pgdat->node_zones[i].present_pages;
  2265. /* A special case here: if zone has no page, we think it's balanced */
  2266. return balanced_pages >= (present_pages >> 2);
  2267. }
  2268. /* is kswapd sleeping prematurely? */
  2269. static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
  2270. int classzone_idx)
  2271. {
  2272. int i;
  2273. unsigned long balanced = 0;
  2274. bool all_zones_ok = true;
  2275. /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
  2276. if (remaining)
  2277. return true;
  2278. /* Check the watermark levels */
  2279. for (i = 0; i <= classzone_idx; i++) {
  2280. struct zone *zone = pgdat->node_zones + i;
  2281. if (!populated_zone(zone))
  2282. continue;
  2283. /*
  2284. * balance_pgdat() skips over all_unreclaimable after
  2285. * DEF_PRIORITY. Effectively, it considers them balanced so
  2286. * they must be considered balanced here as well if kswapd
  2287. * is to sleep
  2288. */
  2289. if (zone->all_unreclaimable) {
  2290. balanced += zone->present_pages;
  2291. continue;
  2292. }
  2293. if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
  2294. i, 0))
  2295. all_zones_ok = false;
  2296. else
  2297. balanced += zone->present_pages;
  2298. }
  2299. /*
  2300. * For high-order requests, the balanced zones must contain at least
  2301. * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
  2302. * must be balanced
  2303. */
  2304. if (order)
  2305. return !pgdat_balanced(pgdat, balanced, classzone_idx);
  2306. else
  2307. return !all_zones_ok;
  2308. }
  2309. /*
  2310. * For kswapd, balance_pgdat() will work across all this node's zones until
  2311. * they are all at high_wmark_pages(zone).
  2312. *
  2313. * Returns the final order kswapd was reclaiming at
  2314. *
  2315. * There is special handling here for zones which are full of pinned pages.
  2316. * This can happen if the pages are all mlocked, or if they are all used by
  2317. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  2318. * What we do is to detect the case where all pages in the zone have been
  2319. * scanned twice and there has been zero successful reclaim. Mark the zone as
  2320. * dead and from now on, only perform a short scan. Basically we're polling
  2321. * the zone for when the problem goes away.
  2322. *
  2323. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  2324. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  2325. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  2326. * lower zones regardless of the number of free pages in the lower zones. This
  2327. * interoperates with the page allocator fallback scheme to ensure that aging
  2328. * of pages is balanced across the zones.
  2329. */
  2330. static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  2331. int *classzone_idx)
  2332. {
  2333. int all_zones_ok;
  2334. unsigned long balanced;
  2335. int priority;
  2336. int i;
  2337. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  2338. unsigned long total_scanned;
  2339. struct reclaim_state *reclaim_state = current->reclaim_state;
  2340. unsigned long nr_soft_reclaimed;
  2341. unsigned long nr_soft_scanned;
  2342. struct scan_control sc = {
  2343. .gfp_mask = GFP_KERNEL,
  2344. .may_unmap = 1,
  2345. .may_swap = 1,
  2346. /*
  2347. * kswapd doesn't want to be bailed out while reclaim. because
  2348. * we want to put equal scanning pressure on each zone.
  2349. */
  2350. .nr_to_reclaim = ULONG_MAX,
  2351. .order = order,
  2352. .target_mem_cgroup = NULL,
  2353. };
  2354. struct shrink_control shrink = {
  2355. .gfp_mask = sc.gfp_mask,
  2356. };
  2357. loop_again:
  2358. total_scanned = 0;
  2359. sc.nr_reclaimed = 0;
  2360. sc.may_writepage = !laptop_mode;
  2361. count_vm_event(PAGEOUTRUN);
  2362. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  2363. unsigned long lru_pages = 0;
  2364. int has_under_min_watermark_zone = 0;
  2365. /* The swap token gets in the way of swapout... */
  2366. if (!priority)
  2367. disable_swap_token(NULL);
  2368. all_zones_ok = 1;
  2369. balanced = 0;
  2370. /*
  2371. * Scan in the highmem->dma direction for the highest
  2372. * zone which needs scanning
  2373. */
  2374. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  2375. struct zone *zone = pgdat->node_zones + i;
  2376. if (!populated_zone(zone))
  2377. continue;
  2378. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  2379. continue;
  2380. /*
  2381. * Do some background aging of the anon list, to give
  2382. * pages a chance to be referenced before reclaiming.
  2383. */
  2384. age_active_anon(zone, &sc, priority);
  2385. /*
  2386. * If the number of buffer_heads in the machine
  2387. * exceeds the maximum allowed level and this node
  2388. * has a highmem zone, force kswapd to reclaim from
  2389. * it to relieve lowmem pressure.
  2390. */
  2391. if (buffer_heads_over_limit && is_highmem_idx(i)) {
  2392. end_zone = i;
  2393. break;
  2394. }
  2395. if (!zone_watermark_ok_safe(zone, order,
  2396. high_wmark_pages(zone), 0, 0)) {
  2397. end_zone = i;
  2398. break;
  2399. } else {
  2400. /* If balanced, clear the congested flag */
  2401. zone_clear_flag(zone, ZONE_CONGESTED);
  2402. }
  2403. }
  2404. if (i < 0)
  2405. goto out;
  2406. for (i = 0; i <= end_zone; i++) {
  2407. struct zone *zone = pgdat->node_zones + i;
  2408. lru_pages += zone_reclaimable_pages(zone);
  2409. }
  2410. /*
  2411. * Now scan the zone in the dma->highmem direction, stopping
  2412. * at the last zone which needs scanning.
  2413. *
  2414. * We do this because the page allocator works in the opposite
  2415. * direction. This prevents the page allocator from allocating
  2416. * pages behind kswapd's direction of progress, which would
  2417. * cause too much scanning of the lower zones.
  2418. */
  2419. for (i = 0; i <= end_zone; i++) {
  2420. struct zone *zone = pgdat->node_zones + i;
  2421. int nr_slab, testorder;
  2422. unsigned long balance_gap;
  2423. if (!populated_zone(zone))
  2424. continue;
  2425. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  2426. continue;
  2427. sc.nr_scanned = 0;
  2428. nr_soft_scanned = 0;
  2429. /*
  2430. * Call soft limit reclaim before calling shrink_zone.
  2431. */
  2432. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2433. order, sc.gfp_mask,
  2434. &nr_soft_scanned);
  2435. sc.nr_reclaimed += nr_soft_reclaimed;
  2436. total_scanned += nr_soft_scanned;
  2437. /*
  2438. * We put equal pressure on every zone, unless
  2439. * one zone has way too many pages free
  2440. * already. The "too many pages" is defined
  2441. * as the high wmark plus a "gap" where the
  2442. * gap is either the low watermark or 1%
  2443. * of the zone, whichever is smaller.
  2444. */
  2445. balance_gap = min(low_wmark_pages(zone),
  2446. (zone->present_pages +
  2447. KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  2448. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  2449. /*
  2450. * Kswapd reclaims only single pages with compaction
  2451. * enabled. Trying too hard to reclaim until contiguous
  2452. * free pages have become available can hurt performance
  2453. * by evicting too much useful data from memory.
  2454. * Do not reclaim more than needed for compaction.
  2455. */
  2456. testorder = order;
  2457. if (COMPACTION_BUILD && order &&
  2458. compaction_suitable(zone, order) !=
  2459. COMPACT_SKIPPED)
  2460. testorder = 0;
  2461. if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
  2462. !zone_watermark_ok_safe(zone, order,
  2463. high_wmark_pages(zone) + balance_gap,
  2464. end_zone, 0)) {
  2465. shrink_zone(priority, zone, &sc);
  2466. reclaim_state->reclaimed_slab = 0;
  2467. nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
  2468. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  2469. total_scanned += sc.nr_scanned;
  2470. if (nr_slab == 0 && !zone_reclaimable(zone))
  2471. zone->all_unreclaimable = 1;
  2472. }
  2473. /*
  2474. * If we've done a decent amount of scanning and
  2475. * the reclaim ratio is low, start doing writepage
  2476. * even in laptop mode
  2477. */
  2478. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  2479. total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
  2480. sc.may_writepage = 1;
  2481. if (zone->all_unreclaimable) {
  2482. if (end_zone && end_zone == i)
  2483. end_zone--;
  2484. continue;
  2485. }
  2486. if (!zone_watermark_ok_safe(zone, testorder,
  2487. high_wmark_pages(zone), end_zone, 0)) {
  2488. all_zones_ok = 0;
  2489. /*
  2490. * We are still under min water mark. This
  2491. * means that we have a GFP_ATOMIC allocation
  2492. * failure risk. Hurry up!
  2493. */
  2494. if (!zone_watermark_ok_safe(zone, order,
  2495. min_wmark_pages(zone), end_zone, 0))
  2496. has_under_min_watermark_zone = 1;
  2497. } else {
  2498. /*
  2499. * If a zone reaches its high watermark,
  2500. * consider it to be no longer congested. It's
  2501. * possible there are dirty pages backed by
  2502. * congested BDIs but as pressure is relieved,
  2503. * spectulatively avoid congestion waits
  2504. */
  2505. zone_clear_flag(zone, ZONE_CONGESTED);
  2506. if (i <= *classzone_idx)
  2507. balanced += zone->present_pages;
  2508. }
  2509. }
  2510. if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
  2511. break; /* kswapd: all done */
  2512. /*
  2513. * OK, kswapd is getting into trouble. Take a nap, then take
  2514. * another pass across the zones.
  2515. */
  2516. if (total_scanned && (priority < DEF_PRIORITY - 2)) {
  2517. if (has_under_min_watermark_zone)
  2518. count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
  2519. else
  2520. congestion_wait(BLK_RW_ASYNC, HZ/10);
  2521. }
  2522. /*
  2523. * We do this so kswapd doesn't build up large priorities for
  2524. * example when it is freeing in parallel with allocators. It
  2525. * matches the direct reclaim path behaviour in terms of impact
  2526. * on zone->*_priority.
  2527. */
  2528. if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
  2529. break;
  2530. }
  2531. out:
  2532. /*
  2533. * order-0: All zones must meet high watermark for a balanced node
  2534. * high-order: Balanced zones must make up at least 25% of the node
  2535. * for the node to be balanced
  2536. */
  2537. if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
  2538. cond_resched();
  2539. try_to_freeze();
  2540. /*
  2541. * Fragmentation may mean that the system cannot be
  2542. * rebalanced for high-order allocations in all zones.
  2543. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
  2544. * it means the zones have been fully scanned and are still
  2545. * not balanced. For high-order allocations, there is
  2546. * little point trying all over again as kswapd may
  2547. * infinite loop.
  2548. *
  2549. * Instead, recheck all watermarks at order-0 as they
  2550. * are the most important. If watermarks are ok, kswapd will go
  2551. * back to sleep. High-order users can still perform direct
  2552. * reclaim if they wish.
  2553. */
  2554. if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
  2555. order = sc.order = 0;
  2556. goto loop_again;
  2557. }
  2558. /*
  2559. * If kswapd was reclaiming at a higher order, it has the option of
  2560. * sleeping without all zones being balanced. Before it does, it must
  2561. * ensure that the watermarks for order-0 on *all* zones are met and
  2562. * that the congestion flags are cleared. The congestion flag must
  2563. * be cleared as kswapd is the only mechanism that clears the flag
  2564. * and it is potentially going to sleep here.
  2565. */
  2566. if (order) {
  2567. int zones_need_compaction = 1;
  2568. for (i = 0; i <= end_zone; i++) {
  2569. struct zone *zone = pgdat->node_zones + i;
  2570. if (!populated_zone(zone))
  2571. continue;
  2572. if (zone->all_unreclaimable && priority != DEF_PRIORITY)
  2573. continue;
  2574. /* Would compaction fail due to lack of free memory? */
  2575. if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
  2576. goto loop_again;
  2577. /* Confirm the zone is balanced for order-0 */
  2578. if (!zone_watermark_ok(zone, 0,
  2579. high_wmark_pages(zone), 0, 0)) {
  2580. order = sc.order = 0;
  2581. goto loop_again;
  2582. }
  2583. /* Check if the memory needs to be defragmented. */
  2584. if (zone_watermark_ok(zone, order,
  2585. low_wmark_pages(zone), *classzone_idx, 0))
  2586. zones_need_compaction = 0;
  2587. /* If balanced, clear the congested flag */
  2588. zone_clear_flag(zone, ZONE_CONGESTED);
  2589. }
  2590. if (zones_need_compaction)
  2591. compact_pgdat(pgdat, order);
  2592. }
  2593. /*
  2594. * Return the order we were reclaiming at so sleeping_prematurely()
  2595. * makes a decision on the order we were last reclaiming at. However,
  2596. * if another caller entered the allocator slow path while kswapd
  2597. * was awake, order will remain at the higher level
  2598. */
  2599. *classzone_idx = end_zone;
  2600. return order;
  2601. }
  2602. static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  2603. {
  2604. long remaining = 0;
  2605. DEFINE_WAIT(wait);
  2606. if (freezing(current) || kthread_should_stop())
  2607. return;
  2608. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2609. /* Try to sleep for a short interval */
  2610. if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
  2611. remaining = schedule_timeout(HZ/10);
  2612. finish_wait(&pgdat->kswapd_wait, &wait);
  2613. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2614. }
  2615. /*
  2616. * After a short sleep, check if it was a premature sleep. If not, then
  2617. * go fully to sleep until explicitly woken up.
  2618. */
  2619. if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
  2620. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  2621. /*
  2622. * vmstat counters are not perfectly accurate and the estimated
  2623. * value for counters such as NR_FREE_PAGES can deviate from the
  2624. * true value by nr_online_cpus * threshold. To avoid the zone
  2625. * watermarks being breached while under pressure, we reduce the
  2626. * per-cpu vmstat threshold while kswapd is awake and restore
  2627. * them before going back to sleep.
  2628. */
  2629. set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
  2630. schedule();
  2631. set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
  2632. } else {
  2633. if (remaining)
  2634. count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
  2635. else
  2636. count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
  2637. }
  2638. finish_wait(&pgdat->kswapd_wait, &wait);
  2639. }
  2640. /*
  2641. * The background pageout daemon, started as a kernel thread
  2642. * from the init process.
  2643. *
  2644. * This basically trickles out pages so that we have _some_
  2645. * free memory available even if there is no other activity
  2646. * that frees anything up. This is needed for things like routing
  2647. * etc, where we otherwise might have all activity going on in
  2648. * asynchronous contexts that cannot page things out.
  2649. *
  2650. * If there are applications that are active memory-allocators
  2651. * (most normal use), this basically shouldn't matter.
  2652. */
  2653. static int kswapd(void *p)
  2654. {
  2655. unsigned long order, new_order;
  2656. unsigned balanced_order;
  2657. int classzone_idx, new_classzone_idx;
  2658. int balanced_classzone_idx;
  2659. pg_data_t *pgdat = (pg_data_t*)p;
  2660. struct task_struct *tsk = current;
  2661. struct reclaim_state reclaim_state = {
  2662. .reclaimed_slab = 0,
  2663. };
  2664. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  2665. lockdep_set_current_reclaim_state(GFP_KERNEL);
  2666. if (!cpumask_empty(cpumask))
  2667. set_cpus_allowed_ptr(tsk, cpumask);
  2668. current->reclaim_state = &reclaim_state;
  2669. /*
  2670. * Tell the memory management that we're a "memory allocator",
  2671. * and that if we need more memory we should get access to it
  2672. * regardless (see "__alloc_pages()"). "kswapd" should
  2673. * never get caught in the normal page freeing logic.
  2674. *
  2675. * (Kswapd normally doesn't need memory anyway, but sometimes
  2676. * you need a small amount of memory in order to be able to
  2677. * page out something else, and this flag essentially protects
  2678. * us from recursively trying to free more memory as we're
  2679. * trying to free the first piece of memory in the first place).
  2680. */
  2681. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  2682. set_freezable();
  2683. order = new_order = 0;
  2684. balanced_order = 0;
  2685. classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
  2686. balanced_classzone_idx = classzone_idx;
  2687. for ( ; ; ) {
  2688. int ret;
  2689. /*
  2690. * If the last balance_pgdat was unsuccessful it's unlikely a
  2691. * new request of a similar or harder type will succeed soon
  2692. * so consider going to sleep on the basis we reclaimed at
  2693. */
  2694. if (balanced_classzone_idx >= new_classzone_idx &&
  2695. balanced_order == new_order) {
  2696. new_order = pgdat->kswapd_max_order;
  2697. new_classzone_idx = pgdat->classzone_idx;
  2698. pgdat->kswapd_max_order = 0;
  2699. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2700. }
  2701. if (order < new_order || classzone_idx > new_classzone_idx) {
  2702. /*
  2703. * Don't sleep if someone wants a larger 'order'
  2704. * allocation or has tigher zone constraints
  2705. */
  2706. order = new_order;
  2707. classzone_idx = new_classzone_idx;
  2708. } else {
  2709. kswapd_try_to_sleep(pgdat, balanced_order,
  2710. balanced_classzone_idx);
  2711. order = pgdat->kswapd_max_order;
  2712. classzone_idx = pgdat->classzone_idx;
  2713. new_order = order;
  2714. new_classzone_idx = classzone_idx;
  2715. pgdat->kswapd_max_order = 0;
  2716. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2717. }
  2718. ret = try_to_freeze();
  2719. if (kthread_should_stop())
  2720. break;
  2721. /*
  2722. * We can speed up thawing tasks if we don't call balance_pgdat
  2723. * after returning from the refrigerator
  2724. */
  2725. if (!ret) {
  2726. trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
  2727. balanced_classzone_idx = classzone_idx;
  2728. balanced_order = balance_pgdat(pgdat, order,
  2729. &balanced_classzone_idx);
  2730. }
  2731. }
  2732. return 0;
  2733. }
  2734. /*
  2735. * A zone is low on free memory, so wake its kswapd task to service it.
  2736. */
  2737. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  2738. {
  2739. pg_data_t *pgdat;
  2740. if (!populated_zone(zone))
  2741. return;
  2742. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2743. return;
  2744. pgdat = zone->zone_pgdat;
  2745. if (pgdat->kswapd_max_order < order) {
  2746. pgdat->kswapd_max_order = order;
  2747. pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
  2748. }
  2749. if (!waitqueue_active(&pgdat->kswapd_wait))
  2750. return;
  2751. if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
  2752. return;
  2753. trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
  2754. wake_up_interruptible(&pgdat->kswapd_wait);
  2755. }
  2756. /*
  2757. * The reclaimable count would be mostly accurate.
  2758. * The less reclaimable pages may be
  2759. * - mlocked pages, which will be moved to unevictable list when encountered
  2760. * - mapped pages, which may require several travels to be reclaimed
  2761. * - dirty pages, which is not "instantly" reclaimable
  2762. */
  2763. unsigned long global_reclaimable_pages(void)
  2764. {
  2765. int nr;
  2766. nr = global_page_state(NR_ACTIVE_FILE) +
  2767. global_page_state(NR_INACTIVE_FILE);
  2768. if (nr_swap_pages > 0)
  2769. nr += global_page_state(NR_ACTIVE_ANON) +
  2770. global_page_state(NR_INACTIVE_ANON);
  2771. return nr;
  2772. }
  2773. unsigned long zone_reclaimable_pages(struct zone *zone)
  2774. {
  2775. int nr;
  2776. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  2777. zone_page_state(zone, NR_INACTIVE_FILE);
  2778. if (nr_swap_pages > 0)
  2779. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  2780. zone_page_state(zone, NR_INACTIVE_ANON);
  2781. return nr;
  2782. }
  2783. #ifdef CONFIG_HIBERNATION
  2784. /*
  2785. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  2786. * freed pages.
  2787. *
  2788. * Rather than trying to age LRUs the aim is to preserve the overall
  2789. * LRU order by reclaiming preferentially
  2790. * inactive > active > active referenced > active mapped
  2791. */
  2792. unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  2793. {
  2794. struct reclaim_state reclaim_state;
  2795. struct scan_control sc = {
  2796. .gfp_mask = GFP_HIGHUSER_MOVABLE,
  2797. .may_swap = 1,
  2798. .may_unmap = 1,
  2799. .may_writepage = 1,
  2800. .nr_to_reclaim = nr_to_reclaim,
  2801. .hibernation_mode = 1,
  2802. .order = 0,
  2803. };
  2804. struct shrink_control shrink = {
  2805. .gfp_mask = sc.gfp_mask,
  2806. };
  2807. struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  2808. struct task_struct *p = current;
  2809. unsigned long nr_reclaimed;
  2810. p->flags |= PF_MEMALLOC;
  2811. lockdep_set_current_reclaim_state(sc.gfp_mask);
  2812. reclaim_state.reclaimed_slab = 0;
  2813. p->reclaim_state = &reclaim_state;
  2814. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2815. p->reclaim_state = NULL;
  2816. lockdep_clear_current_reclaim_state();
  2817. p->flags &= ~PF_MEMALLOC;
  2818. return nr_reclaimed;
  2819. }
  2820. #endif /* CONFIG_HIBERNATION */
  2821. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2822. not required for correctness. So if the last cpu in a node goes
  2823. away, we get changed to run anywhere: as the first one comes back,
  2824. restore their cpu bindings. */
  2825. static int __devinit cpu_callback(struct notifier_block *nfb,
  2826. unsigned long action, void *hcpu)
  2827. {
  2828. int nid;
  2829. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2830. for_each_node_state(nid, N_HIGH_MEMORY) {
  2831. pg_data_t *pgdat = NODE_DATA(nid);
  2832. const struct cpumask *mask;
  2833. mask = cpumask_of_node(pgdat->node_id);
  2834. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2835. /* One of our CPUs online: restore mask */
  2836. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2837. }
  2838. }
  2839. return NOTIFY_OK;
  2840. }
  2841. /*
  2842. * This kswapd start function will be called by init and node-hot-add.
  2843. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2844. */
  2845. int kswapd_run(int nid)
  2846. {
  2847. pg_data_t *pgdat = NODE_DATA(nid);
  2848. int ret = 0;
  2849. if (pgdat->kswapd)
  2850. return 0;
  2851. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2852. if (IS_ERR(pgdat->kswapd)) {
  2853. /* failure at boot is fatal */
  2854. BUG_ON(system_state == SYSTEM_BOOTING);
  2855. printk("Failed to start kswapd on node %d\n",nid);
  2856. ret = -1;
  2857. }
  2858. return ret;
  2859. }
  2860. /*
  2861. * Called by memory hotplug when all memory in a node is offlined.
  2862. */
  2863. void kswapd_stop(int nid)
  2864. {
  2865. struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
  2866. if (kswapd)
  2867. kthread_stop(kswapd);
  2868. }
  2869. static int __init kswapd_init(void)
  2870. {
  2871. int nid;
  2872. swap_setup();
  2873. for_each_node_state(nid, N_HIGH_MEMORY)
  2874. kswapd_run(nid);
  2875. hotcpu_notifier(cpu_callback, 0);
  2876. return 0;
  2877. }
  2878. module_init(kswapd_init)
  2879. #ifdef CONFIG_NUMA
  2880. /*
  2881. * Zone reclaim mode
  2882. *
  2883. * If non-zero call zone_reclaim when the number of free pages falls below
  2884. * the watermarks.
  2885. */
  2886. int zone_reclaim_mode __read_mostly;
  2887. #define RECLAIM_OFF 0
  2888. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2889. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2890. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2891. /*
  2892. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2893. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2894. * a zone.
  2895. */
  2896. #define ZONE_RECLAIM_PRIORITY 4
  2897. /*
  2898. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2899. * occur.
  2900. */
  2901. int sysctl_min_unmapped_ratio = 1;
  2902. /*
  2903. * If the number of slab pages in a zone grows beyond this percentage then
  2904. * slab reclaim needs to occur.
  2905. */
  2906. int sysctl_min_slab_ratio = 5;
  2907. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  2908. {
  2909. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  2910. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  2911. zone_page_state(zone, NR_ACTIVE_FILE);
  2912. /*
  2913. * It's possible for there to be more file mapped pages than
  2914. * accounted for by the pages on the file LRU lists because
  2915. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  2916. */
  2917. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  2918. }
  2919. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  2920. static long zone_pagecache_reclaimable(struct zone *zone)
  2921. {
  2922. long nr_pagecache_reclaimable;
  2923. long delta = 0;
  2924. /*
  2925. * If RECLAIM_SWAP is set, then all file pages are considered
  2926. * potentially reclaimable. Otherwise, we have to worry about
  2927. * pages like swapcache and zone_unmapped_file_pages() provides
  2928. * a better estimate
  2929. */
  2930. if (zone_reclaim_mode & RECLAIM_SWAP)
  2931. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  2932. else
  2933. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  2934. /* If we can't clean pages, remove dirty pages from consideration */
  2935. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  2936. delta += zone_page_state(zone, NR_FILE_DIRTY);
  2937. /* Watch for any possible underflows due to delta */
  2938. if (unlikely(delta > nr_pagecache_reclaimable))
  2939. delta = nr_pagecache_reclaimable;
  2940. return nr_pagecache_reclaimable - delta;
  2941. }
  2942. /*
  2943. * Try to free up some pages from this zone through reclaim.
  2944. */
  2945. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2946. {
  2947. /* Minimum pages needed in order to stay on node */
  2948. const unsigned long nr_pages = 1 << order;
  2949. struct task_struct *p = current;
  2950. struct reclaim_state reclaim_state;
  2951. int priority;
  2952. struct scan_control sc = {
  2953. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  2954. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  2955. .may_swap = 1,
  2956. .nr_to_reclaim = max_t(unsigned long, nr_pages,
  2957. SWAP_CLUSTER_MAX),
  2958. .gfp_mask = gfp_mask,
  2959. .order = order,
  2960. };
  2961. struct shrink_control shrink = {
  2962. .gfp_mask = sc.gfp_mask,
  2963. };
  2964. unsigned long nr_slab_pages0, nr_slab_pages1;
  2965. cond_resched();
  2966. /*
  2967. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  2968. * and we also need to be able to write out pages for RECLAIM_WRITE
  2969. * and RECLAIM_SWAP.
  2970. */
  2971. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  2972. lockdep_set_current_reclaim_state(gfp_mask);
  2973. reclaim_state.reclaimed_slab = 0;
  2974. p->reclaim_state = &reclaim_state;
  2975. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  2976. /*
  2977. * Free memory by calling shrink zone with increasing
  2978. * priorities until we have enough memory freed.
  2979. */
  2980. priority = ZONE_RECLAIM_PRIORITY;
  2981. do {
  2982. shrink_zone(priority, zone, &sc);
  2983. priority--;
  2984. } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
  2985. }
  2986. nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2987. if (nr_slab_pages0 > zone->min_slab_pages) {
  2988. /*
  2989. * shrink_slab() does not currently allow us to determine how
  2990. * many pages were freed in this zone. So we take the current
  2991. * number of slab pages and shake the slab until it is reduced
  2992. * by the same nr_pages that we used for reclaiming unmapped
  2993. * pages.
  2994. *
  2995. * Note that shrink_slab will free memory on all zones and may
  2996. * take a long time.
  2997. */
  2998. for (;;) {
  2999. unsigned long lru_pages = zone_reclaimable_pages(zone);
  3000. /* No reclaimable slab or very low memory pressure */
  3001. if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
  3002. break;
  3003. /* Freed enough memory */
  3004. nr_slab_pages1 = zone_page_state(zone,
  3005. NR_SLAB_RECLAIMABLE);
  3006. if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
  3007. break;
  3008. }
  3009. /*
  3010. * Update nr_reclaimed by the number of slab pages we
  3011. * reclaimed from this zone.
  3012. */
  3013. nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  3014. if (nr_slab_pages1 < nr_slab_pages0)
  3015. sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
  3016. }
  3017. p->reclaim_state = NULL;
  3018. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  3019. lockdep_clear_current_reclaim_state();
  3020. return sc.nr_reclaimed >= nr_pages;
  3021. }
  3022. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  3023. {
  3024. int node_id;
  3025. int ret;
  3026. /*
  3027. * Zone reclaim reclaims unmapped file backed pages and
  3028. * slab pages if we are over the defined limits.
  3029. *
  3030. * A small portion of unmapped file backed pages is needed for
  3031. * file I/O otherwise pages read by file I/O will be immediately
  3032. * thrown out if the zone is overallocated. So we do not reclaim
  3033. * if less than a specified percentage of the zone is used by
  3034. * unmapped file backed pages.
  3035. */
  3036. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  3037. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  3038. return ZONE_RECLAIM_FULL;
  3039. if (zone->all_unreclaimable)
  3040. return ZONE_RECLAIM_FULL;
  3041. /*
  3042. * Do not scan if the allocation should not be delayed.
  3043. */
  3044. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  3045. return ZONE_RECLAIM_NOSCAN;
  3046. /*
  3047. * Only run zone reclaim on the local zone or on zones that do not
  3048. * have associated processors. This will favor the local processor
  3049. * over remote processors and spread off node memory allocations
  3050. * as wide as possible.
  3051. */
  3052. node_id = zone_to_nid(zone);
  3053. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  3054. return ZONE_RECLAIM_NOSCAN;
  3055. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  3056. return ZONE_RECLAIM_NOSCAN;
  3057. ret = __zone_reclaim(zone, gfp_mask, order);
  3058. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  3059. if (!ret)
  3060. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  3061. return ret;
  3062. }
  3063. #endif
  3064. /*
  3065. * page_evictable - test whether a page is evictable
  3066. * @page: the page to test
  3067. * @vma: the VMA in which the page is or will be mapped, may be NULL
  3068. *
  3069. * Test whether page is evictable--i.e., should be placed on active/inactive
  3070. * lists vs unevictable list. The vma argument is !NULL when called from the
  3071. * fault path to determine how to instantate a new page.
  3072. *
  3073. * Reasons page might not be evictable:
  3074. * (1) page's mapping marked unevictable
  3075. * (2) page is part of an mlocked VMA
  3076. *
  3077. */
  3078. int page_evictable(struct page *page, struct vm_area_struct *vma)
  3079. {
  3080. if (mapping_unevictable(page_mapping(page)))
  3081. return 0;
  3082. if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
  3083. return 0;
  3084. return 1;
  3085. }
  3086. #ifdef CONFIG_SHMEM
  3087. /**
  3088. * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
  3089. * @pages: array of pages to check
  3090. * @nr_pages: number of pages to check
  3091. *
  3092. * Checks pages for evictability and moves them to the appropriate lru list.
  3093. *
  3094. * This function is only used for SysV IPC SHM_UNLOCK.
  3095. */
  3096. void check_move_unevictable_pages(struct page **pages, int nr_pages)
  3097. {
  3098. struct lruvec *lruvec;
  3099. struct zone *zone = NULL;
  3100. int pgscanned = 0;
  3101. int pgrescued = 0;
  3102. int i;
  3103. for (i = 0; i < nr_pages; i++) {
  3104. struct page *page = pages[i];
  3105. struct zone *pagezone;
  3106. pgscanned++;
  3107. pagezone = page_zone(page);
  3108. if (pagezone != zone) {
  3109. if (zone)
  3110. spin_unlock_irq(&zone->lru_lock);
  3111. zone = pagezone;
  3112. spin_lock_irq(&zone->lru_lock);
  3113. }
  3114. if (!PageLRU(page) || !PageUnevictable(page))
  3115. continue;
  3116. if (page_evictable(page, NULL)) {
  3117. enum lru_list lru = page_lru_base_type(page);
  3118. VM_BUG_ON(PageActive(page));
  3119. ClearPageUnevictable(page);
  3120. __dec_zone_state(zone, NR_UNEVICTABLE);
  3121. lruvec = mem_cgroup_lru_move_lists(zone, page,
  3122. LRU_UNEVICTABLE, lru);
  3123. list_move(&page->lru, &lruvec->lists[lru]);
  3124. __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
  3125. pgrescued++;
  3126. }
  3127. }
  3128. if (zone) {
  3129. __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
  3130. __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
  3131. spin_unlock_irq(&zone->lru_lock);
  3132. }
  3133. }
  3134. #endif /* CONFIG_SHMEM */
  3135. static void warn_scan_unevictable_pages(void)
  3136. {
  3137. printk_once(KERN_WARNING
  3138. "%s: The scan_unevictable_pages sysctl/node-interface has been "
  3139. "disabled for lack of a legitimate use case. If you have "
  3140. "one, please send an email to linux-mm@kvack.org.\n",
  3141. current->comm);
  3142. }
  3143. /*
  3144. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  3145. * all nodes' unevictable lists for evictable pages
  3146. */
  3147. unsigned long scan_unevictable_pages;
  3148. int scan_unevictable_handler(struct ctl_table *table, int write,
  3149. void __user *buffer,
  3150. size_t *length, loff_t *ppos)
  3151. {
  3152. warn_scan_unevictable_pages();
  3153. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  3154. scan_unevictable_pages = 0;
  3155. return 0;
  3156. }
  3157. #ifdef CONFIG_NUMA
  3158. /*
  3159. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  3160. * a specified node's per zone unevictable lists for evictable pages.
  3161. */
  3162. static ssize_t read_scan_unevictable_node(struct device *dev,
  3163. struct device_attribute *attr,
  3164. char *buf)
  3165. {
  3166. warn_scan_unevictable_pages();
  3167. return sprintf(buf, "0\n"); /* always zero; should fit... */
  3168. }
  3169. static ssize_t write_scan_unevictable_node(struct device *dev,
  3170. struct device_attribute *attr,
  3171. const char *buf, size_t count)
  3172. {
  3173. warn_scan_unevictable_pages();
  3174. return 1;
  3175. }
  3176. static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  3177. read_scan_unevictable_node,
  3178. write_scan_unevictable_node);
  3179. int scan_unevictable_register_node(struct node *node)
  3180. {
  3181. return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3182. }
  3183. void scan_unevictable_unregister_node(struct node *node)
  3184. {
  3185. device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3186. }
  3187. #endif