vmscan.c 106 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/gfp.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmpressure.h>
  22. #include <linux/vmstat.h>
  23. #include <linux/file.h>
  24. #include <linux/writeback.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/buffer_head.h> /* for try_to_release_page(),
  27. buffer_heads_over_limit */
  28. #include <linux/mm_inline.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/compaction.h>
  35. #include <linux/notifier.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/delay.h>
  38. #include <linux/kthread.h>
  39. #include <linux/freezer.h>
  40. #include <linux/memcontrol.h>
  41. #include <linux/delayacct.h>
  42. #include <linux/sysctl.h>
  43. #include <linux/oom.h>
  44. #include <linux/prefetch.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/div64.h>
  47. #include <linux/swapops.h>
  48. #include "internal.h"
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/vmscan.h>
  51. struct scan_control {
  52. /* Incremented by the number of inactive pages that were scanned */
  53. unsigned long nr_scanned;
  54. /* Number of pages freed so far during a call to shrink_zones() */
  55. unsigned long nr_reclaimed;
  56. /* How many pages shrink_list() should reclaim */
  57. unsigned long nr_to_reclaim;
  58. unsigned long hibernation_mode;
  59. /* This context's GFP mask */
  60. gfp_t gfp_mask;
  61. int may_writepage;
  62. /* Can mapped pages be reclaimed? */
  63. int may_unmap;
  64. /* Can pages be swapped as part of reclaim? */
  65. int may_swap;
  66. int order;
  67. /* Scan (total_size >> priority) pages at once */
  68. int priority;
  69. /*
  70. * The memory cgroup that hit its limit and as a result is the
  71. * primary target of this reclaim invocation.
  72. */
  73. struct mem_cgroup *target_mem_cgroup;
  74. /*
  75. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  76. * are scanned.
  77. */
  78. nodemask_t *nodemask;
  79. };
  80. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  81. #ifdef ARCH_HAS_PREFETCH
  82. #define prefetch_prev_lru_page(_page, _base, _field) \
  83. do { \
  84. if ((_page)->lru.prev != _base) { \
  85. struct page *prev; \
  86. \
  87. prev = lru_to_page(&(_page->lru)); \
  88. prefetch(&prev->_field); \
  89. } \
  90. } while (0)
  91. #else
  92. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  93. #endif
  94. #ifdef ARCH_HAS_PREFETCHW
  95. #define prefetchw_prev_lru_page(_page, _base, _field) \
  96. do { \
  97. if ((_page)->lru.prev != _base) { \
  98. struct page *prev; \
  99. \
  100. prev = lru_to_page(&(_page->lru)); \
  101. prefetchw(&prev->_field); \
  102. } \
  103. } while (0)
  104. #else
  105. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  106. #endif
  107. /*
  108. * From 0 .. 100. Higher means more swappy.
  109. */
  110. int vm_swappiness = 60;
  111. unsigned long vm_total_pages; /* The total number of pages which the VM controls */
  112. static LIST_HEAD(shrinker_list);
  113. static DECLARE_RWSEM(shrinker_rwsem);
  114. #ifdef CONFIG_MEMCG
  115. static bool global_reclaim(struct scan_control *sc)
  116. {
  117. return !sc->target_mem_cgroup;
  118. }
  119. #else
  120. static bool global_reclaim(struct scan_control *sc)
  121. {
  122. return true;
  123. }
  124. #endif
  125. static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  126. {
  127. if (!mem_cgroup_disabled())
  128. return mem_cgroup_get_lru_size(lruvec, lru);
  129. return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
  130. }
  131. /*
  132. * Add a shrinker callback to be called from the vm
  133. */
  134. void register_shrinker(struct shrinker *shrinker)
  135. {
  136. atomic_long_set(&shrinker->nr_in_batch, 0);
  137. down_write(&shrinker_rwsem);
  138. list_add_tail(&shrinker->list, &shrinker_list);
  139. up_write(&shrinker_rwsem);
  140. }
  141. EXPORT_SYMBOL(register_shrinker);
  142. /*
  143. * Remove one
  144. */
  145. void unregister_shrinker(struct shrinker *shrinker)
  146. {
  147. down_write(&shrinker_rwsem);
  148. list_del(&shrinker->list);
  149. up_write(&shrinker_rwsem);
  150. }
  151. EXPORT_SYMBOL(unregister_shrinker);
  152. static inline int do_shrinker_shrink(struct shrinker *shrinker,
  153. struct shrink_control *sc,
  154. unsigned long nr_to_scan)
  155. {
  156. sc->nr_to_scan = nr_to_scan;
  157. return (*shrinker->shrink)(shrinker, sc);
  158. }
  159. #define SHRINK_BATCH 128
  160. /*
  161. * Call the shrink functions to age shrinkable caches
  162. *
  163. * Here we assume it costs one seek to replace a lru page and that it also
  164. * takes a seek to recreate a cache object. With this in mind we age equal
  165. * percentages of the lru and ageable caches. This should balance the seeks
  166. * generated by these structures.
  167. *
  168. * If the vm encountered mapped pages on the LRU it increase the pressure on
  169. * slab to avoid swapping.
  170. *
  171. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  172. *
  173. * `lru_pages' represents the number of on-LRU pages in all the zones which
  174. * are eligible for the caller's allocation attempt. It is used for balancing
  175. * slab reclaim versus page reclaim.
  176. *
  177. * Returns the number of slab objects which we shrunk.
  178. */
  179. unsigned long shrink_slab(struct shrink_control *shrinkctl,
  180. unsigned long nr_pages_scanned,
  181. unsigned long lru_pages)
  182. {
  183. struct shrinker *shrinker;
  184. unsigned long freed = 0;
  185. if (nr_pages_scanned == 0)
  186. nr_pages_scanned = SWAP_CLUSTER_MAX;
  187. if (!down_read_trylock(&shrinker_rwsem)) {
  188. /*
  189. * If we would return 0, our callers would understand that we
  190. * have nothing else to shrink and give up trying. By returning
  191. * 1 we keep it going and assume we'll be able to shrink next
  192. * time.
  193. */
  194. freed = 1;
  195. goto out;
  196. }
  197. list_for_each_entry(shrinker, &shrinker_list, list) {
  198. unsigned long long delta;
  199. long total_scan;
  200. long max_pass;
  201. long nr;
  202. long new_nr;
  203. long batch_size = shrinker->batch ? shrinker->batch
  204. : SHRINK_BATCH;
  205. if (shrinker->count_objects)
  206. max_pass = shrinker->count_objects(shrinker, shrinkctl);
  207. else
  208. max_pass = do_shrinker_shrink(shrinker, shrinkctl, 0);
  209. if (max_pass == 0)
  210. continue;
  211. /*
  212. * copy the current shrinker scan count into a local variable
  213. * and zero it so that other concurrent shrinker invocations
  214. * don't also do this scanning work.
  215. */
  216. nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
  217. total_scan = nr;
  218. delta = (4 * nr_pages_scanned) / shrinker->seeks;
  219. delta *= max_pass;
  220. do_div(delta, lru_pages + 1);
  221. total_scan += delta;
  222. if (total_scan < 0) {
  223. printk(KERN_ERR
  224. "shrink_slab: %pF negative objects to delete nr=%ld\n",
  225. shrinker->shrink, total_scan);
  226. total_scan = max_pass;
  227. }
  228. /*
  229. * We need to avoid excessive windup on filesystem shrinkers
  230. * due to large numbers of GFP_NOFS allocations causing the
  231. * shrinkers to return -1 all the time. This results in a large
  232. * nr being built up so when a shrink that can do some work
  233. * comes along it empties the entire cache due to nr >>>
  234. * max_pass. This is bad for sustaining a working set in
  235. * memory.
  236. *
  237. * Hence only allow the shrinker to scan the entire cache when
  238. * a large delta change is calculated directly.
  239. */
  240. if (delta < max_pass / 4)
  241. total_scan = min(total_scan, max_pass / 2);
  242. /*
  243. * Avoid risking looping forever due to too large nr value:
  244. * never try to free more than twice the estimate number of
  245. * freeable entries.
  246. */
  247. if (total_scan > max_pass * 2)
  248. total_scan = max_pass * 2;
  249. trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
  250. nr_pages_scanned, lru_pages,
  251. max_pass, delta, total_scan);
  252. while (total_scan >= batch_size) {
  253. if (shrinker->scan_objects) {
  254. unsigned long ret;
  255. shrinkctl->nr_to_scan = batch_size;
  256. ret = shrinker->scan_objects(shrinker, shrinkctl);
  257. if (ret == SHRINK_STOP)
  258. break;
  259. freed += ret;
  260. } else {
  261. int nr_before;
  262. long ret;
  263. nr_before = do_shrinker_shrink(shrinker, shrinkctl, 0);
  264. ret = do_shrinker_shrink(shrinker, shrinkctl,
  265. batch_size);
  266. if (ret == -1)
  267. break;
  268. if (ret < nr_before)
  269. freed += nr_before - ret;
  270. }
  271. count_vm_events(SLABS_SCANNED, batch_size);
  272. total_scan -= batch_size;
  273. cond_resched();
  274. }
  275. /*
  276. * move the unused scan count back into the shrinker in a
  277. * manner that handles concurrent updates. If we exhausted the
  278. * scan, there is no need to do an update.
  279. */
  280. if (total_scan > 0)
  281. new_nr = atomic_long_add_return(total_scan,
  282. &shrinker->nr_in_batch);
  283. else
  284. new_nr = atomic_long_read(&shrinker->nr_in_batch);
  285. trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
  286. }
  287. up_read(&shrinker_rwsem);
  288. out:
  289. cond_resched();
  290. return freed;
  291. }
  292. static inline int is_page_cache_freeable(struct page *page)
  293. {
  294. /*
  295. * A freeable page cache page is referenced only by the caller
  296. * that isolated the page, the page cache radix tree and
  297. * optional buffer heads at page->private.
  298. */
  299. return page_count(page) - page_has_private(page) == 2;
  300. }
  301. static int may_write_to_queue(struct backing_dev_info *bdi,
  302. struct scan_control *sc)
  303. {
  304. if (current->flags & PF_SWAPWRITE)
  305. return 1;
  306. if (!bdi_write_congested(bdi))
  307. return 1;
  308. if (bdi == current->backing_dev_info)
  309. return 1;
  310. return 0;
  311. }
  312. /*
  313. * We detected a synchronous write error writing a page out. Probably
  314. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  315. * fsync(), msync() or close().
  316. *
  317. * The tricky part is that after writepage we cannot touch the mapping: nothing
  318. * prevents it from being freed up. But we have a ref on the page and once
  319. * that page is locked, the mapping is pinned.
  320. *
  321. * We're allowed to run sleeping lock_page() here because we know the caller has
  322. * __GFP_FS.
  323. */
  324. static void handle_write_error(struct address_space *mapping,
  325. struct page *page, int error)
  326. {
  327. lock_page(page);
  328. if (page_mapping(page) == mapping)
  329. mapping_set_error(mapping, error);
  330. unlock_page(page);
  331. }
  332. /* possible outcome of pageout() */
  333. typedef enum {
  334. /* failed to write page out, page is locked */
  335. PAGE_KEEP,
  336. /* move page to the active list, page is locked */
  337. PAGE_ACTIVATE,
  338. /* page has been sent to the disk successfully, page is unlocked */
  339. PAGE_SUCCESS,
  340. /* page is clean and locked */
  341. PAGE_CLEAN,
  342. } pageout_t;
  343. /*
  344. * pageout is called by shrink_page_list() for each dirty page.
  345. * Calls ->writepage().
  346. */
  347. static pageout_t pageout(struct page *page, struct address_space *mapping,
  348. struct scan_control *sc)
  349. {
  350. /*
  351. * If the page is dirty, only perform writeback if that write
  352. * will be non-blocking. To prevent this allocation from being
  353. * stalled by pagecache activity. But note that there may be
  354. * stalls if we need to run get_block(). We could test
  355. * PagePrivate for that.
  356. *
  357. * If this process is currently in __generic_file_aio_write() against
  358. * this page's queue, we can perform writeback even if that
  359. * will block.
  360. *
  361. * If the page is swapcache, write it back even if that would
  362. * block, for some throttling. This happens by accident, because
  363. * swap_backing_dev_info is bust: it doesn't reflect the
  364. * congestion state of the swapdevs. Easy to fix, if needed.
  365. */
  366. if (!is_page_cache_freeable(page))
  367. return PAGE_KEEP;
  368. if (!mapping) {
  369. /*
  370. * Some data journaling orphaned pages can have
  371. * page->mapping == NULL while being dirty with clean buffers.
  372. */
  373. if (page_has_private(page)) {
  374. if (try_to_free_buffers(page)) {
  375. ClearPageDirty(page);
  376. printk("%s: orphaned page\n", __func__);
  377. return PAGE_CLEAN;
  378. }
  379. }
  380. return PAGE_KEEP;
  381. }
  382. if (mapping->a_ops->writepage == NULL)
  383. return PAGE_ACTIVATE;
  384. if (!may_write_to_queue(mapping->backing_dev_info, sc))
  385. return PAGE_KEEP;
  386. if (clear_page_dirty_for_io(page)) {
  387. int res;
  388. struct writeback_control wbc = {
  389. .sync_mode = WB_SYNC_NONE,
  390. .nr_to_write = SWAP_CLUSTER_MAX,
  391. .range_start = 0,
  392. .range_end = LLONG_MAX,
  393. .for_reclaim = 1,
  394. };
  395. SetPageReclaim(page);
  396. res = mapping->a_ops->writepage(page, &wbc);
  397. if (res < 0)
  398. handle_write_error(mapping, page, res);
  399. if (res == AOP_WRITEPAGE_ACTIVATE) {
  400. ClearPageReclaim(page);
  401. return PAGE_ACTIVATE;
  402. }
  403. if (!PageWriteback(page)) {
  404. /* synchronous write or broken a_ops? */
  405. ClearPageReclaim(page);
  406. }
  407. trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
  408. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  409. return PAGE_SUCCESS;
  410. }
  411. return PAGE_CLEAN;
  412. }
  413. /*
  414. * Same as remove_mapping, but if the page is removed from the mapping, it
  415. * gets returned with a refcount of 0.
  416. */
  417. static int __remove_mapping(struct address_space *mapping, struct page *page)
  418. {
  419. BUG_ON(!PageLocked(page));
  420. BUG_ON(mapping != page_mapping(page));
  421. spin_lock_irq(&mapping->tree_lock);
  422. /*
  423. * The non racy check for a busy page.
  424. *
  425. * Must be careful with the order of the tests. When someone has
  426. * a ref to the page, it may be possible that they dirty it then
  427. * drop the reference. So if PageDirty is tested before page_count
  428. * here, then the following race may occur:
  429. *
  430. * get_user_pages(&page);
  431. * [user mapping goes away]
  432. * write_to(page);
  433. * !PageDirty(page) [good]
  434. * SetPageDirty(page);
  435. * put_page(page);
  436. * !page_count(page) [good, discard it]
  437. *
  438. * [oops, our write_to data is lost]
  439. *
  440. * Reversing the order of the tests ensures such a situation cannot
  441. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  442. * load is not satisfied before that of page->_count.
  443. *
  444. * Note that if SetPageDirty is always performed via set_page_dirty,
  445. * and thus under tree_lock, then this ordering is not required.
  446. */
  447. if (!page_freeze_refs(page, 2))
  448. goto cannot_free;
  449. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  450. if (unlikely(PageDirty(page))) {
  451. page_unfreeze_refs(page, 2);
  452. goto cannot_free;
  453. }
  454. if (PageSwapCache(page)) {
  455. swp_entry_t swap = { .val = page_private(page) };
  456. __delete_from_swap_cache(page);
  457. spin_unlock_irq(&mapping->tree_lock);
  458. swapcache_free(swap, page);
  459. } else {
  460. void (*freepage)(struct page *);
  461. freepage = mapping->a_ops->freepage;
  462. __delete_from_page_cache(page);
  463. spin_unlock_irq(&mapping->tree_lock);
  464. mem_cgroup_uncharge_cache_page(page);
  465. if (freepage != NULL)
  466. freepage(page);
  467. }
  468. return 1;
  469. cannot_free:
  470. spin_unlock_irq(&mapping->tree_lock);
  471. return 0;
  472. }
  473. /*
  474. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  475. * someone else has a ref on the page, abort and return 0. If it was
  476. * successfully detached, return 1. Assumes the caller has a single ref on
  477. * this page.
  478. */
  479. int remove_mapping(struct address_space *mapping, struct page *page)
  480. {
  481. if (__remove_mapping(mapping, page)) {
  482. /*
  483. * Unfreezing the refcount with 1 rather than 2 effectively
  484. * drops the pagecache ref for us without requiring another
  485. * atomic operation.
  486. */
  487. page_unfreeze_refs(page, 1);
  488. return 1;
  489. }
  490. return 0;
  491. }
  492. /**
  493. * putback_lru_page - put previously isolated page onto appropriate LRU list
  494. * @page: page to be put back to appropriate lru list
  495. *
  496. * Add previously isolated @page to appropriate LRU list.
  497. * Page may still be unevictable for other reasons.
  498. *
  499. * lru_lock must not be held, interrupts must be enabled.
  500. */
  501. void putback_lru_page(struct page *page)
  502. {
  503. int lru;
  504. int was_unevictable = PageUnevictable(page);
  505. VM_BUG_ON(PageLRU(page));
  506. redo:
  507. ClearPageUnevictable(page);
  508. if (page_evictable(page)) {
  509. /*
  510. * For evictable pages, we can use the cache.
  511. * In event of a race, worst case is we end up with an
  512. * unevictable page on [in]active list.
  513. * We know how to handle that.
  514. */
  515. lru = page_lru_base_type(page);
  516. lru_cache_add(page);
  517. } else {
  518. /*
  519. * Put unevictable pages directly on zone's unevictable
  520. * list.
  521. */
  522. lru = LRU_UNEVICTABLE;
  523. add_page_to_unevictable_list(page);
  524. /*
  525. * When racing with an mlock or AS_UNEVICTABLE clearing
  526. * (page is unlocked) make sure that if the other thread
  527. * does not observe our setting of PG_lru and fails
  528. * isolation/check_move_unevictable_pages,
  529. * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
  530. * the page back to the evictable list.
  531. *
  532. * The other side is TestClearPageMlocked() or shmem_lock().
  533. */
  534. smp_mb();
  535. }
  536. /*
  537. * page's status can change while we move it among lru. If an evictable
  538. * page is on unevictable list, it never be freed. To avoid that,
  539. * check after we added it to the list, again.
  540. */
  541. if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
  542. if (!isolate_lru_page(page)) {
  543. put_page(page);
  544. goto redo;
  545. }
  546. /* This means someone else dropped this page from LRU
  547. * So, it will be freed or putback to LRU again. There is
  548. * nothing to do here.
  549. */
  550. }
  551. if (was_unevictable && lru != LRU_UNEVICTABLE)
  552. count_vm_event(UNEVICTABLE_PGRESCUED);
  553. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  554. count_vm_event(UNEVICTABLE_PGCULLED);
  555. put_page(page); /* drop ref from isolate */
  556. }
  557. enum page_references {
  558. PAGEREF_RECLAIM,
  559. PAGEREF_RECLAIM_CLEAN,
  560. PAGEREF_KEEP,
  561. PAGEREF_ACTIVATE,
  562. };
  563. static enum page_references page_check_references(struct page *page,
  564. struct scan_control *sc)
  565. {
  566. int referenced_ptes, referenced_page;
  567. unsigned long vm_flags;
  568. referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
  569. &vm_flags);
  570. referenced_page = TestClearPageReferenced(page);
  571. /*
  572. * Mlock lost the isolation race with us. Let try_to_unmap()
  573. * move the page to the unevictable list.
  574. */
  575. if (vm_flags & VM_LOCKED)
  576. return PAGEREF_RECLAIM;
  577. if (referenced_ptes) {
  578. if (PageSwapBacked(page))
  579. return PAGEREF_ACTIVATE;
  580. /*
  581. * All mapped pages start out with page table
  582. * references from the instantiating fault, so we need
  583. * to look twice if a mapped file page is used more
  584. * than once.
  585. *
  586. * Mark it and spare it for another trip around the
  587. * inactive list. Another page table reference will
  588. * lead to its activation.
  589. *
  590. * Note: the mark is set for activated pages as well
  591. * so that recently deactivated but used pages are
  592. * quickly recovered.
  593. */
  594. SetPageReferenced(page);
  595. if (referenced_page || referenced_ptes > 1)
  596. return PAGEREF_ACTIVATE;
  597. /*
  598. * Activate file-backed executable pages after first usage.
  599. */
  600. if (vm_flags & VM_EXEC)
  601. return PAGEREF_ACTIVATE;
  602. return PAGEREF_KEEP;
  603. }
  604. /* Reclaim if clean, defer dirty pages to writeback */
  605. if (referenced_page && !PageSwapBacked(page))
  606. return PAGEREF_RECLAIM_CLEAN;
  607. return PAGEREF_RECLAIM;
  608. }
  609. /* Check if a page is dirty or under writeback */
  610. static void page_check_dirty_writeback(struct page *page,
  611. bool *dirty, bool *writeback)
  612. {
  613. struct address_space *mapping;
  614. /*
  615. * Anonymous pages are not handled by flushers and must be written
  616. * from reclaim context. Do not stall reclaim based on them
  617. */
  618. if (!page_is_file_cache(page)) {
  619. *dirty = false;
  620. *writeback = false;
  621. return;
  622. }
  623. /* By default assume that the page flags are accurate */
  624. *dirty = PageDirty(page);
  625. *writeback = PageWriteback(page);
  626. /* Verify dirty/writeback state if the filesystem supports it */
  627. if (!page_has_private(page))
  628. return;
  629. mapping = page_mapping(page);
  630. if (mapping && mapping->a_ops->is_dirty_writeback)
  631. mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
  632. }
  633. /*
  634. * shrink_page_list() returns the number of reclaimed pages
  635. */
  636. static unsigned long shrink_page_list(struct list_head *page_list,
  637. struct zone *zone,
  638. struct scan_control *sc,
  639. enum ttu_flags ttu_flags,
  640. unsigned long *ret_nr_dirty,
  641. unsigned long *ret_nr_unqueued_dirty,
  642. unsigned long *ret_nr_congested,
  643. unsigned long *ret_nr_writeback,
  644. unsigned long *ret_nr_immediate,
  645. bool force_reclaim)
  646. {
  647. LIST_HEAD(ret_pages);
  648. LIST_HEAD(free_pages);
  649. int pgactivate = 0;
  650. unsigned long nr_unqueued_dirty = 0;
  651. unsigned long nr_dirty = 0;
  652. unsigned long nr_congested = 0;
  653. unsigned long nr_reclaimed = 0;
  654. unsigned long nr_writeback = 0;
  655. unsigned long nr_immediate = 0;
  656. cond_resched();
  657. mem_cgroup_uncharge_start();
  658. while (!list_empty(page_list)) {
  659. struct address_space *mapping;
  660. struct page *page;
  661. int may_enter_fs;
  662. enum page_references references = PAGEREF_RECLAIM_CLEAN;
  663. bool dirty, writeback;
  664. cond_resched();
  665. page = lru_to_page(page_list);
  666. list_del(&page->lru);
  667. if (!trylock_page(page))
  668. goto keep;
  669. VM_BUG_ON(PageActive(page));
  670. VM_BUG_ON(page_zone(page) != zone);
  671. sc->nr_scanned++;
  672. if (unlikely(!page_evictable(page)))
  673. goto cull_mlocked;
  674. if (!sc->may_unmap && page_mapped(page))
  675. goto keep_locked;
  676. /* Double the slab pressure for mapped and swapcache pages */
  677. if (page_mapped(page) || PageSwapCache(page))
  678. sc->nr_scanned++;
  679. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  680. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  681. /*
  682. * The number of dirty pages determines if a zone is marked
  683. * reclaim_congested which affects wait_iff_congested. kswapd
  684. * will stall and start writing pages if the tail of the LRU
  685. * is all dirty unqueued pages.
  686. */
  687. page_check_dirty_writeback(page, &dirty, &writeback);
  688. if (dirty || writeback)
  689. nr_dirty++;
  690. if (dirty && !writeback)
  691. nr_unqueued_dirty++;
  692. /*
  693. * Treat this page as congested if the underlying BDI is or if
  694. * pages are cycling through the LRU so quickly that the
  695. * pages marked for immediate reclaim are making it to the
  696. * end of the LRU a second time.
  697. */
  698. mapping = page_mapping(page);
  699. if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
  700. (writeback && PageReclaim(page)))
  701. nr_congested++;
  702. /*
  703. * If a page at the tail of the LRU is under writeback, there
  704. * are three cases to consider.
  705. *
  706. * 1) If reclaim is encountering an excessive number of pages
  707. * under writeback and this page is both under writeback and
  708. * PageReclaim then it indicates that pages are being queued
  709. * for IO but are being recycled through the LRU before the
  710. * IO can complete. Waiting on the page itself risks an
  711. * indefinite stall if it is impossible to writeback the
  712. * page due to IO error or disconnected storage so instead
  713. * note that the LRU is being scanned too quickly and the
  714. * caller can stall after page list has been processed.
  715. *
  716. * 2) Global reclaim encounters a page, memcg encounters a
  717. * page that is not marked for immediate reclaim or
  718. * the caller does not have __GFP_IO. In this case mark
  719. * the page for immediate reclaim and continue scanning.
  720. *
  721. * __GFP_IO is checked because a loop driver thread might
  722. * enter reclaim, and deadlock if it waits on a page for
  723. * which it is needed to do the write (loop masks off
  724. * __GFP_IO|__GFP_FS for this reason); but more thought
  725. * would probably show more reasons.
  726. *
  727. * Don't require __GFP_FS, since we're not going into the
  728. * FS, just waiting on its writeback completion. Worryingly,
  729. * ext4 gfs2 and xfs allocate pages with
  730. * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
  731. * may_enter_fs here is liable to OOM on them.
  732. *
  733. * 3) memcg encounters a page that is not already marked
  734. * PageReclaim. memcg does not have any dirty pages
  735. * throttling so we could easily OOM just because too many
  736. * pages are in writeback and there is nothing else to
  737. * reclaim. Wait for the writeback to complete.
  738. */
  739. if (PageWriteback(page)) {
  740. /* Case 1 above */
  741. if (current_is_kswapd() &&
  742. PageReclaim(page) &&
  743. zone_is_reclaim_writeback(zone)) {
  744. nr_immediate++;
  745. goto keep_locked;
  746. /* Case 2 above */
  747. } else if (global_reclaim(sc) ||
  748. !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
  749. /*
  750. * This is slightly racy - end_page_writeback()
  751. * might have just cleared PageReclaim, then
  752. * setting PageReclaim here end up interpreted
  753. * as PageReadahead - but that does not matter
  754. * enough to care. What we do want is for this
  755. * page to have PageReclaim set next time memcg
  756. * reclaim reaches the tests above, so it will
  757. * then wait_on_page_writeback() to avoid OOM;
  758. * and it's also appropriate in global reclaim.
  759. */
  760. SetPageReclaim(page);
  761. nr_writeback++;
  762. goto keep_locked;
  763. /* Case 3 above */
  764. } else {
  765. wait_on_page_writeback(page);
  766. }
  767. }
  768. if (!force_reclaim)
  769. references = page_check_references(page, sc);
  770. switch (references) {
  771. case PAGEREF_ACTIVATE:
  772. goto activate_locked;
  773. case PAGEREF_KEEP:
  774. goto keep_locked;
  775. case PAGEREF_RECLAIM:
  776. case PAGEREF_RECLAIM_CLEAN:
  777. ; /* try to reclaim the page below */
  778. }
  779. /*
  780. * Anonymous process memory has backing store?
  781. * Try to allocate it some swap space here.
  782. */
  783. if (PageAnon(page) && !PageSwapCache(page)) {
  784. if (!(sc->gfp_mask & __GFP_IO))
  785. goto keep_locked;
  786. if (!add_to_swap(page, page_list))
  787. goto activate_locked;
  788. may_enter_fs = 1;
  789. /* Adding to swap updated mapping */
  790. mapping = page_mapping(page);
  791. }
  792. /*
  793. * The page is mapped into the page tables of one or more
  794. * processes. Try to unmap it here.
  795. */
  796. if (page_mapped(page) && mapping) {
  797. switch (try_to_unmap(page, ttu_flags)) {
  798. case SWAP_FAIL:
  799. goto activate_locked;
  800. case SWAP_AGAIN:
  801. goto keep_locked;
  802. case SWAP_MLOCK:
  803. goto cull_mlocked;
  804. case SWAP_SUCCESS:
  805. ; /* try to free the page below */
  806. }
  807. }
  808. if (PageDirty(page)) {
  809. /*
  810. * Only kswapd can writeback filesystem pages to
  811. * avoid risk of stack overflow but only writeback
  812. * if many dirty pages have been encountered.
  813. */
  814. if (page_is_file_cache(page) &&
  815. (!current_is_kswapd() ||
  816. !zone_is_reclaim_dirty(zone))) {
  817. /*
  818. * Immediately reclaim when written back.
  819. * Similar in principal to deactivate_page()
  820. * except we already have the page isolated
  821. * and know it's dirty
  822. */
  823. inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
  824. SetPageReclaim(page);
  825. goto keep_locked;
  826. }
  827. if (references == PAGEREF_RECLAIM_CLEAN)
  828. goto keep_locked;
  829. if (!may_enter_fs)
  830. goto keep_locked;
  831. if (!sc->may_writepage)
  832. goto keep_locked;
  833. /* Page is dirty, try to write it out here */
  834. switch (pageout(page, mapping, sc)) {
  835. case PAGE_KEEP:
  836. goto keep_locked;
  837. case PAGE_ACTIVATE:
  838. goto activate_locked;
  839. case PAGE_SUCCESS:
  840. if (PageWriteback(page))
  841. goto keep;
  842. if (PageDirty(page))
  843. goto keep;
  844. /*
  845. * A synchronous write - probably a ramdisk. Go
  846. * ahead and try to reclaim the page.
  847. */
  848. if (!trylock_page(page))
  849. goto keep;
  850. if (PageDirty(page) || PageWriteback(page))
  851. goto keep_locked;
  852. mapping = page_mapping(page);
  853. case PAGE_CLEAN:
  854. ; /* try to free the page below */
  855. }
  856. }
  857. /*
  858. * If the page has buffers, try to free the buffer mappings
  859. * associated with this page. If we succeed we try to free
  860. * the page as well.
  861. *
  862. * We do this even if the page is PageDirty().
  863. * try_to_release_page() does not perform I/O, but it is
  864. * possible for a page to have PageDirty set, but it is actually
  865. * clean (all its buffers are clean). This happens if the
  866. * buffers were written out directly, with submit_bh(). ext3
  867. * will do this, as well as the blockdev mapping.
  868. * try_to_release_page() will discover that cleanness and will
  869. * drop the buffers and mark the page clean - it can be freed.
  870. *
  871. * Rarely, pages can have buffers and no ->mapping. These are
  872. * the pages which were not successfully invalidated in
  873. * truncate_complete_page(). We try to drop those buffers here
  874. * and if that worked, and the page is no longer mapped into
  875. * process address space (page_count == 1) it can be freed.
  876. * Otherwise, leave the page on the LRU so it is swappable.
  877. */
  878. if (page_has_private(page)) {
  879. if (!try_to_release_page(page, sc->gfp_mask))
  880. goto activate_locked;
  881. if (!mapping && page_count(page) == 1) {
  882. unlock_page(page);
  883. if (put_page_testzero(page))
  884. goto free_it;
  885. else {
  886. /*
  887. * rare race with speculative reference.
  888. * the speculative reference will free
  889. * this page shortly, so we may
  890. * increment nr_reclaimed here (and
  891. * leave it off the LRU).
  892. */
  893. nr_reclaimed++;
  894. continue;
  895. }
  896. }
  897. }
  898. if (!mapping || !__remove_mapping(mapping, page))
  899. goto keep_locked;
  900. /*
  901. * At this point, we have no other references and there is
  902. * no way to pick any more up (removed from LRU, removed
  903. * from pagecache). Can use non-atomic bitops now (and
  904. * we obviously don't have to worry about waking up a process
  905. * waiting on the page lock, because there are no references.
  906. */
  907. __clear_page_locked(page);
  908. free_it:
  909. nr_reclaimed++;
  910. /*
  911. * Is there need to periodically free_page_list? It would
  912. * appear not as the counts should be low
  913. */
  914. list_add(&page->lru, &free_pages);
  915. continue;
  916. cull_mlocked:
  917. if (PageSwapCache(page))
  918. try_to_free_swap(page);
  919. unlock_page(page);
  920. putback_lru_page(page);
  921. continue;
  922. activate_locked:
  923. /* Not a candidate for swapping, so reclaim swap space. */
  924. if (PageSwapCache(page) && vm_swap_full())
  925. try_to_free_swap(page);
  926. VM_BUG_ON(PageActive(page));
  927. SetPageActive(page);
  928. pgactivate++;
  929. keep_locked:
  930. unlock_page(page);
  931. keep:
  932. list_add(&page->lru, &ret_pages);
  933. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  934. }
  935. free_hot_cold_page_list(&free_pages, 1);
  936. list_splice(&ret_pages, page_list);
  937. count_vm_events(PGACTIVATE, pgactivate);
  938. mem_cgroup_uncharge_end();
  939. *ret_nr_dirty += nr_dirty;
  940. *ret_nr_congested += nr_congested;
  941. *ret_nr_unqueued_dirty += nr_unqueued_dirty;
  942. *ret_nr_writeback += nr_writeback;
  943. *ret_nr_immediate += nr_immediate;
  944. return nr_reclaimed;
  945. }
  946. unsigned long reclaim_clean_pages_from_list(struct zone *zone,
  947. struct list_head *page_list)
  948. {
  949. struct scan_control sc = {
  950. .gfp_mask = GFP_KERNEL,
  951. .priority = DEF_PRIORITY,
  952. .may_unmap = 1,
  953. };
  954. unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
  955. struct page *page, *next;
  956. LIST_HEAD(clean_pages);
  957. list_for_each_entry_safe(page, next, page_list, lru) {
  958. if (page_is_file_cache(page) && !PageDirty(page)) {
  959. ClearPageActive(page);
  960. list_move(&page->lru, &clean_pages);
  961. }
  962. }
  963. ret = shrink_page_list(&clean_pages, zone, &sc,
  964. TTU_UNMAP|TTU_IGNORE_ACCESS,
  965. &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
  966. list_splice(&clean_pages, page_list);
  967. __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
  968. return ret;
  969. }
  970. /*
  971. * Attempt to remove the specified page from its LRU. Only take this page
  972. * if it is of the appropriate PageActive status. Pages which are being
  973. * freed elsewhere are also ignored.
  974. *
  975. * page: page to consider
  976. * mode: one of the LRU isolation modes defined above
  977. *
  978. * returns 0 on success, -ve errno on failure.
  979. */
  980. int __isolate_lru_page(struct page *page, isolate_mode_t mode)
  981. {
  982. int ret = -EINVAL;
  983. /* Only take pages on the LRU. */
  984. if (!PageLRU(page))
  985. return ret;
  986. /* Compaction should not handle unevictable pages but CMA can do so */
  987. if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
  988. return ret;
  989. ret = -EBUSY;
  990. /*
  991. * To minimise LRU disruption, the caller can indicate that it only
  992. * wants to isolate pages it will be able to operate on without
  993. * blocking - clean pages for the most part.
  994. *
  995. * ISOLATE_CLEAN means that only clean pages should be isolated. This
  996. * is used by reclaim when it is cannot write to backing storage
  997. *
  998. * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
  999. * that it is possible to migrate without blocking
  1000. */
  1001. if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
  1002. /* All the caller can do on PageWriteback is block */
  1003. if (PageWriteback(page))
  1004. return ret;
  1005. if (PageDirty(page)) {
  1006. struct address_space *mapping;
  1007. /* ISOLATE_CLEAN means only clean pages */
  1008. if (mode & ISOLATE_CLEAN)
  1009. return ret;
  1010. /*
  1011. * Only pages without mappings or that have a
  1012. * ->migratepage callback are possible to migrate
  1013. * without blocking
  1014. */
  1015. mapping = page_mapping(page);
  1016. if (mapping && !mapping->a_ops->migratepage)
  1017. return ret;
  1018. }
  1019. }
  1020. if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
  1021. return ret;
  1022. if (likely(get_page_unless_zero(page))) {
  1023. /*
  1024. * Be careful not to clear PageLRU until after we're
  1025. * sure the page is not being freed elsewhere -- the
  1026. * page release code relies on it.
  1027. */
  1028. ClearPageLRU(page);
  1029. ret = 0;
  1030. }
  1031. return ret;
  1032. }
  1033. /*
  1034. * zone->lru_lock is heavily contended. Some of the functions that
  1035. * shrink the lists perform better by taking out a batch of pages
  1036. * and working on them outside the LRU lock.
  1037. *
  1038. * For pagecache intensive workloads, this function is the hottest
  1039. * spot in the kernel (apart from copy_*_user functions).
  1040. *
  1041. * Appropriate locks must be held before calling this function.
  1042. *
  1043. * @nr_to_scan: The number of pages to look through on the list.
  1044. * @lruvec: The LRU vector to pull pages from.
  1045. * @dst: The temp list to put pages on to.
  1046. * @nr_scanned: The number of pages that were scanned.
  1047. * @sc: The scan_control struct for this reclaim session
  1048. * @mode: One of the LRU isolation modes
  1049. * @lru: LRU list id for isolating
  1050. *
  1051. * returns how many pages were moved onto *@dst.
  1052. */
  1053. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  1054. struct lruvec *lruvec, struct list_head *dst,
  1055. unsigned long *nr_scanned, struct scan_control *sc,
  1056. isolate_mode_t mode, enum lru_list lru)
  1057. {
  1058. struct list_head *src = &lruvec->lists[lru];
  1059. unsigned long nr_taken = 0;
  1060. unsigned long scan;
  1061. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  1062. struct page *page;
  1063. int nr_pages;
  1064. page = lru_to_page(src);
  1065. prefetchw_prev_lru_page(page, src, flags);
  1066. VM_BUG_ON(!PageLRU(page));
  1067. switch (__isolate_lru_page(page, mode)) {
  1068. case 0:
  1069. nr_pages = hpage_nr_pages(page);
  1070. mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
  1071. list_move(&page->lru, dst);
  1072. nr_taken += nr_pages;
  1073. break;
  1074. case -EBUSY:
  1075. /* else it is being freed elsewhere */
  1076. list_move(&page->lru, src);
  1077. continue;
  1078. default:
  1079. BUG();
  1080. }
  1081. }
  1082. *nr_scanned = scan;
  1083. trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
  1084. nr_taken, mode, is_file_lru(lru));
  1085. return nr_taken;
  1086. }
  1087. /**
  1088. * isolate_lru_page - tries to isolate a page from its LRU list
  1089. * @page: page to isolate from its LRU list
  1090. *
  1091. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  1092. * vmstat statistic corresponding to whatever LRU list the page was on.
  1093. *
  1094. * Returns 0 if the page was removed from an LRU list.
  1095. * Returns -EBUSY if the page was not on an LRU list.
  1096. *
  1097. * The returned page will have PageLRU() cleared. If it was found on
  1098. * the active list, it will have PageActive set. If it was found on
  1099. * the unevictable list, it will have the PageUnevictable bit set. That flag
  1100. * may need to be cleared by the caller before letting the page go.
  1101. *
  1102. * The vmstat statistic corresponding to the list on which the page was
  1103. * found will be decremented.
  1104. *
  1105. * Restrictions:
  1106. * (1) Must be called with an elevated refcount on the page. This is a
  1107. * fundamentnal difference from isolate_lru_pages (which is called
  1108. * without a stable reference).
  1109. * (2) the lru_lock must not be held.
  1110. * (3) interrupts must be enabled.
  1111. */
  1112. int isolate_lru_page(struct page *page)
  1113. {
  1114. int ret = -EBUSY;
  1115. VM_BUG_ON(!page_count(page));
  1116. if (PageLRU(page)) {
  1117. struct zone *zone = page_zone(page);
  1118. struct lruvec *lruvec;
  1119. spin_lock_irq(&zone->lru_lock);
  1120. lruvec = mem_cgroup_page_lruvec(page, zone);
  1121. if (PageLRU(page)) {
  1122. int lru = page_lru(page);
  1123. get_page(page);
  1124. ClearPageLRU(page);
  1125. del_page_from_lru_list(page, lruvec, lru);
  1126. ret = 0;
  1127. }
  1128. spin_unlock_irq(&zone->lru_lock);
  1129. }
  1130. return ret;
  1131. }
  1132. /*
  1133. * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
  1134. * then get resheduled. When there are massive number of tasks doing page
  1135. * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
  1136. * the LRU list will go small and be scanned faster than necessary, leading to
  1137. * unnecessary swapping, thrashing and OOM.
  1138. */
  1139. static int too_many_isolated(struct zone *zone, int file,
  1140. struct scan_control *sc)
  1141. {
  1142. unsigned long inactive, isolated;
  1143. if (current_is_kswapd())
  1144. return 0;
  1145. if (!global_reclaim(sc))
  1146. return 0;
  1147. if (file) {
  1148. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1149. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  1150. } else {
  1151. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1152. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  1153. }
  1154. /*
  1155. * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
  1156. * won't get blocked by normal direct-reclaimers, forming a circular
  1157. * deadlock.
  1158. */
  1159. if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
  1160. inactive >>= 3;
  1161. return isolated > inactive;
  1162. }
  1163. static noinline_for_stack void
  1164. putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
  1165. {
  1166. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1167. struct zone *zone = lruvec_zone(lruvec);
  1168. LIST_HEAD(pages_to_free);
  1169. /*
  1170. * Put back any unfreeable pages.
  1171. */
  1172. while (!list_empty(page_list)) {
  1173. struct page *page = lru_to_page(page_list);
  1174. int lru;
  1175. VM_BUG_ON(PageLRU(page));
  1176. list_del(&page->lru);
  1177. if (unlikely(!page_evictable(page))) {
  1178. spin_unlock_irq(&zone->lru_lock);
  1179. putback_lru_page(page);
  1180. spin_lock_irq(&zone->lru_lock);
  1181. continue;
  1182. }
  1183. lruvec = mem_cgroup_page_lruvec(page, zone);
  1184. SetPageLRU(page);
  1185. lru = page_lru(page);
  1186. add_page_to_lru_list(page, lruvec, lru);
  1187. if (is_active_lru(lru)) {
  1188. int file = is_file_lru(lru);
  1189. int numpages = hpage_nr_pages(page);
  1190. reclaim_stat->recent_rotated[file] += numpages;
  1191. }
  1192. if (put_page_testzero(page)) {
  1193. __ClearPageLRU(page);
  1194. __ClearPageActive(page);
  1195. del_page_from_lru_list(page, lruvec, lru);
  1196. if (unlikely(PageCompound(page))) {
  1197. spin_unlock_irq(&zone->lru_lock);
  1198. (*get_compound_page_dtor(page))(page);
  1199. spin_lock_irq(&zone->lru_lock);
  1200. } else
  1201. list_add(&page->lru, &pages_to_free);
  1202. }
  1203. }
  1204. /*
  1205. * To save our caller's stack, now use input list for pages to free.
  1206. */
  1207. list_splice(&pages_to_free, page_list);
  1208. }
  1209. /*
  1210. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  1211. * of reclaimed pages
  1212. */
  1213. static noinline_for_stack unsigned long
  1214. shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
  1215. struct scan_control *sc, enum lru_list lru)
  1216. {
  1217. LIST_HEAD(page_list);
  1218. unsigned long nr_scanned;
  1219. unsigned long nr_reclaimed = 0;
  1220. unsigned long nr_taken;
  1221. unsigned long nr_dirty = 0;
  1222. unsigned long nr_congested = 0;
  1223. unsigned long nr_unqueued_dirty = 0;
  1224. unsigned long nr_writeback = 0;
  1225. unsigned long nr_immediate = 0;
  1226. isolate_mode_t isolate_mode = 0;
  1227. int file = is_file_lru(lru);
  1228. struct zone *zone = lruvec_zone(lruvec);
  1229. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1230. while (unlikely(too_many_isolated(zone, file, sc))) {
  1231. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1232. /* We are about to die and free our memory. Return now. */
  1233. if (fatal_signal_pending(current))
  1234. return SWAP_CLUSTER_MAX;
  1235. }
  1236. lru_add_drain();
  1237. if (!sc->may_unmap)
  1238. isolate_mode |= ISOLATE_UNMAPPED;
  1239. if (!sc->may_writepage)
  1240. isolate_mode |= ISOLATE_CLEAN;
  1241. spin_lock_irq(&zone->lru_lock);
  1242. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
  1243. &nr_scanned, sc, isolate_mode, lru);
  1244. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1245. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1246. if (global_reclaim(sc)) {
  1247. zone->pages_scanned += nr_scanned;
  1248. if (current_is_kswapd())
  1249. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
  1250. else
  1251. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
  1252. }
  1253. spin_unlock_irq(&zone->lru_lock);
  1254. if (nr_taken == 0)
  1255. return 0;
  1256. nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
  1257. &nr_dirty, &nr_unqueued_dirty, &nr_congested,
  1258. &nr_writeback, &nr_immediate,
  1259. false);
  1260. spin_lock_irq(&zone->lru_lock);
  1261. reclaim_stat->recent_scanned[file] += nr_taken;
  1262. if (global_reclaim(sc)) {
  1263. if (current_is_kswapd())
  1264. __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
  1265. nr_reclaimed);
  1266. else
  1267. __count_zone_vm_events(PGSTEAL_DIRECT, zone,
  1268. nr_reclaimed);
  1269. }
  1270. putback_inactive_pages(lruvec, &page_list);
  1271. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1272. spin_unlock_irq(&zone->lru_lock);
  1273. free_hot_cold_page_list(&page_list, 1);
  1274. /*
  1275. * If reclaim is isolating dirty pages under writeback, it implies
  1276. * that the long-lived page allocation rate is exceeding the page
  1277. * laundering rate. Either the global limits are not being effective
  1278. * at throttling processes due to the page distribution throughout
  1279. * zones or there is heavy usage of a slow backing device. The
  1280. * only option is to throttle from reclaim context which is not ideal
  1281. * as there is no guarantee the dirtying process is throttled in the
  1282. * same way balance_dirty_pages() manages.
  1283. *
  1284. * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
  1285. * of pages under pages flagged for immediate reclaim and stall if any
  1286. * are encountered in the nr_immediate check below.
  1287. */
  1288. if (nr_writeback && nr_writeback == nr_taken)
  1289. zone_set_flag(zone, ZONE_WRITEBACK);
  1290. /*
  1291. * memcg will stall in page writeback so only consider forcibly
  1292. * stalling for global reclaim
  1293. */
  1294. if (global_reclaim(sc)) {
  1295. /*
  1296. * Tag a zone as congested if all the dirty pages scanned were
  1297. * backed by a congested BDI and wait_iff_congested will stall.
  1298. */
  1299. if (nr_dirty && nr_dirty == nr_congested)
  1300. zone_set_flag(zone, ZONE_CONGESTED);
  1301. /*
  1302. * If dirty pages are scanned that are not queued for IO, it
  1303. * implies that flushers are not keeping up. In this case, flag
  1304. * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
  1305. * pages from reclaim context. It will forcibly stall in the
  1306. * next check.
  1307. */
  1308. if (nr_unqueued_dirty == nr_taken)
  1309. zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
  1310. /*
  1311. * In addition, if kswapd scans pages marked marked for
  1312. * immediate reclaim and under writeback (nr_immediate), it
  1313. * implies that pages are cycling through the LRU faster than
  1314. * they are written so also forcibly stall.
  1315. */
  1316. if (nr_unqueued_dirty == nr_taken || nr_immediate)
  1317. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1318. }
  1319. /*
  1320. * Stall direct reclaim for IO completions if underlying BDIs or zone
  1321. * is congested. Allow kswapd to continue until it starts encountering
  1322. * unqueued dirty pages or cycling through the LRU too quickly.
  1323. */
  1324. if (!sc->hibernation_mode && !current_is_kswapd())
  1325. wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
  1326. trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
  1327. zone_idx(zone),
  1328. nr_scanned, nr_reclaimed,
  1329. sc->priority,
  1330. trace_shrink_flags(file));
  1331. return nr_reclaimed;
  1332. }
  1333. /*
  1334. * This moves pages from the active list to the inactive list.
  1335. *
  1336. * We move them the other way if the page is referenced by one or more
  1337. * processes, from rmap.
  1338. *
  1339. * If the pages are mostly unmapped, the processing is fast and it is
  1340. * appropriate to hold zone->lru_lock across the whole operation. But if
  1341. * the pages are mapped, the processing is slow (page_referenced()) so we
  1342. * should drop zone->lru_lock around each page. It's impossible to balance
  1343. * this, so instead we remove the pages from the LRU while processing them.
  1344. * It is safe to rely on PG_active against the non-LRU pages in here because
  1345. * nobody will play with that bit on a non-LRU page.
  1346. *
  1347. * The downside is that we have to touch page->_count against each page.
  1348. * But we had to alter page->flags anyway.
  1349. */
  1350. static void move_active_pages_to_lru(struct lruvec *lruvec,
  1351. struct list_head *list,
  1352. struct list_head *pages_to_free,
  1353. enum lru_list lru)
  1354. {
  1355. struct zone *zone = lruvec_zone(lruvec);
  1356. unsigned long pgmoved = 0;
  1357. struct page *page;
  1358. int nr_pages;
  1359. while (!list_empty(list)) {
  1360. page = lru_to_page(list);
  1361. lruvec = mem_cgroup_page_lruvec(page, zone);
  1362. VM_BUG_ON(PageLRU(page));
  1363. SetPageLRU(page);
  1364. nr_pages = hpage_nr_pages(page);
  1365. mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
  1366. list_move(&page->lru, &lruvec->lists[lru]);
  1367. pgmoved += nr_pages;
  1368. if (put_page_testzero(page)) {
  1369. __ClearPageLRU(page);
  1370. __ClearPageActive(page);
  1371. del_page_from_lru_list(page, lruvec, lru);
  1372. if (unlikely(PageCompound(page))) {
  1373. spin_unlock_irq(&zone->lru_lock);
  1374. (*get_compound_page_dtor(page))(page);
  1375. spin_lock_irq(&zone->lru_lock);
  1376. } else
  1377. list_add(&page->lru, pages_to_free);
  1378. }
  1379. }
  1380. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1381. if (!is_active_lru(lru))
  1382. __count_vm_events(PGDEACTIVATE, pgmoved);
  1383. }
  1384. static void shrink_active_list(unsigned long nr_to_scan,
  1385. struct lruvec *lruvec,
  1386. struct scan_control *sc,
  1387. enum lru_list lru)
  1388. {
  1389. unsigned long nr_taken;
  1390. unsigned long nr_scanned;
  1391. unsigned long vm_flags;
  1392. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1393. LIST_HEAD(l_active);
  1394. LIST_HEAD(l_inactive);
  1395. struct page *page;
  1396. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1397. unsigned long nr_rotated = 0;
  1398. isolate_mode_t isolate_mode = 0;
  1399. int file = is_file_lru(lru);
  1400. struct zone *zone = lruvec_zone(lruvec);
  1401. lru_add_drain();
  1402. if (!sc->may_unmap)
  1403. isolate_mode |= ISOLATE_UNMAPPED;
  1404. if (!sc->may_writepage)
  1405. isolate_mode |= ISOLATE_CLEAN;
  1406. spin_lock_irq(&zone->lru_lock);
  1407. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
  1408. &nr_scanned, sc, isolate_mode, lru);
  1409. if (global_reclaim(sc))
  1410. zone->pages_scanned += nr_scanned;
  1411. reclaim_stat->recent_scanned[file] += nr_taken;
  1412. __count_zone_vm_events(PGREFILL, zone, nr_scanned);
  1413. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1414. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1415. spin_unlock_irq(&zone->lru_lock);
  1416. while (!list_empty(&l_hold)) {
  1417. cond_resched();
  1418. page = lru_to_page(&l_hold);
  1419. list_del(&page->lru);
  1420. if (unlikely(!page_evictable(page))) {
  1421. putback_lru_page(page);
  1422. continue;
  1423. }
  1424. if (unlikely(buffer_heads_over_limit)) {
  1425. if (page_has_private(page) && trylock_page(page)) {
  1426. if (page_has_private(page))
  1427. try_to_release_page(page, 0);
  1428. unlock_page(page);
  1429. }
  1430. }
  1431. if (page_referenced(page, 0, sc->target_mem_cgroup,
  1432. &vm_flags)) {
  1433. nr_rotated += hpage_nr_pages(page);
  1434. /*
  1435. * Identify referenced, file-backed active pages and
  1436. * give them one more trip around the active list. So
  1437. * that executable code get better chances to stay in
  1438. * memory under moderate memory pressure. Anon pages
  1439. * are not likely to be evicted by use-once streaming
  1440. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1441. * so we ignore them here.
  1442. */
  1443. if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
  1444. list_add(&page->lru, &l_active);
  1445. continue;
  1446. }
  1447. }
  1448. ClearPageActive(page); /* we are de-activating */
  1449. list_add(&page->lru, &l_inactive);
  1450. }
  1451. /*
  1452. * Move pages back to the lru list.
  1453. */
  1454. spin_lock_irq(&zone->lru_lock);
  1455. /*
  1456. * Count referenced pages from currently used mappings as rotated,
  1457. * even though only some of them are actually re-activated. This
  1458. * helps balance scan pressure between file and anonymous pages in
  1459. * get_scan_ratio.
  1460. */
  1461. reclaim_stat->recent_rotated[file] += nr_rotated;
  1462. move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
  1463. move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
  1464. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1465. spin_unlock_irq(&zone->lru_lock);
  1466. free_hot_cold_page_list(&l_hold, 1);
  1467. }
  1468. #ifdef CONFIG_SWAP
  1469. static int inactive_anon_is_low_global(struct zone *zone)
  1470. {
  1471. unsigned long active, inactive;
  1472. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1473. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1474. if (inactive * zone->inactive_ratio < active)
  1475. return 1;
  1476. return 0;
  1477. }
  1478. /**
  1479. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1480. * @lruvec: LRU vector to check
  1481. *
  1482. * Returns true if the zone does not have enough inactive anon pages,
  1483. * meaning some active anon pages need to be deactivated.
  1484. */
  1485. static int inactive_anon_is_low(struct lruvec *lruvec)
  1486. {
  1487. /*
  1488. * If we don't have swap space, anonymous page deactivation
  1489. * is pointless.
  1490. */
  1491. if (!total_swap_pages)
  1492. return 0;
  1493. if (!mem_cgroup_disabled())
  1494. return mem_cgroup_inactive_anon_is_low(lruvec);
  1495. return inactive_anon_is_low_global(lruvec_zone(lruvec));
  1496. }
  1497. #else
  1498. static inline int inactive_anon_is_low(struct lruvec *lruvec)
  1499. {
  1500. return 0;
  1501. }
  1502. #endif
  1503. /**
  1504. * inactive_file_is_low - check if file pages need to be deactivated
  1505. * @lruvec: LRU vector to check
  1506. *
  1507. * When the system is doing streaming IO, memory pressure here
  1508. * ensures that active file pages get deactivated, until more
  1509. * than half of the file pages are on the inactive list.
  1510. *
  1511. * Once we get to that situation, protect the system's working
  1512. * set from being evicted by disabling active file page aging.
  1513. *
  1514. * This uses a different ratio than the anonymous pages, because
  1515. * the page cache uses a use-once replacement algorithm.
  1516. */
  1517. static int inactive_file_is_low(struct lruvec *lruvec)
  1518. {
  1519. unsigned long inactive;
  1520. unsigned long active;
  1521. inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1522. active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
  1523. return active > inactive;
  1524. }
  1525. static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
  1526. {
  1527. if (is_file_lru(lru))
  1528. return inactive_file_is_low(lruvec);
  1529. else
  1530. return inactive_anon_is_low(lruvec);
  1531. }
  1532. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1533. struct lruvec *lruvec, struct scan_control *sc)
  1534. {
  1535. if (is_active_lru(lru)) {
  1536. if (inactive_list_is_low(lruvec, lru))
  1537. shrink_active_list(nr_to_scan, lruvec, sc, lru);
  1538. return 0;
  1539. }
  1540. return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
  1541. }
  1542. static int vmscan_swappiness(struct scan_control *sc)
  1543. {
  1544. if (global_reclaim(sc))
  1545. return vm_swappiness;
  1546. return mem_cgroup_swappiness(sc->target_mem_cgroup);
  1547. }
  1548. enum scan_balance {
  1549. SCAN_EQUAL,
  1550. SCAN_FRACT,
  1551. SCAN_ANON,
  1552. SCAN_FILE,
  1553. };
  1554. /*
  1555. * Determine how aggressively the anon and file LRU lists should be
  1556. * scanned. The relative value of each set of LRU lists is determined
  1557. * by looking at the fraction of the pages scanned we did rotate back
  1558. * onto the active list instead of evict.
  1559. *
  1560. * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
  1561. * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
  1562. */
  1563. static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
  1564. unsigned long *nr)
  1565. {
  1566. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1567. u64 fraction[2];
  1568. u64 denominator = 0; /* gcc */
  1569. struct zone *zone = lruvec_zone(lruvec);
  1570. unsigned long anon_prio, file_prio;
  1571. enum scan_balance scan_balance;
  1572. unsigned long anon, file, free;
  1573. bool force_scan = false;
  1574. unsigned long ap, fp;
  1575. enum lru_list lru;
  1576. /*
  1577. * If the zone or memcg is small, nr[l] can be 0. This
  1578. * results in no scanning on this priority and a potential
  1579. * priority drop. Global direct reclaim can go to the next
  1580. * zone and tends to have no problems. Global kswapd is for
  1581. * zone balancing and it needs to scan a minimum amount. When
  1582. * reclaiming for a memcg, a priority drop can cause high
  1583. * latencies, so it's better to scan a minimum amount there as
  1584. * well.
  1585. */
  1586. if (current_is_kswapd() && zone->all_unreclaimable)
  1587. force_scan = true;
  1588. if (!global_reclaim(sc))
  1589. force_scan = true;
  1590. /* If we have no swap space, do not bother scanning anon pages. */
  1591. if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
  1592. scan_balance = SCAN_FILE;
  1593. goto out;
  1594. }
  1595. /*
  1596. * Global reclaim will swap to prevent OOM even with no
  1597. * swappiness, but memcg users want to use this knob to
  1598. * disable swapping for individual groups completely when
  1599. * using the memory controller's swap limit feature would be
  1600. * too expensive.
  1601. */
  1602. if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
  1603. scan_balance = SCAN_FILE;
  1604. goto out;
  1605. }
  1606. /*
  1607. * Do not apply any pressure balancing cleverness when the
  1608. * system is close to OOM, scan both anon and file equally
  1609. * (unless the swappiness setting disagrees with swapping).
  1610. */
  1611. if (!sc->priority && vmscan_swappiness(sc)) {
  1612. scan_balance = SCAN_EQUAL;
  1613. goto out;
  1614. }
  1615. anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
  1616. get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1617. file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
  1618. get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1619. /*
  1620. * If it's foreseeable that reclaiming the file cache won't be
  1621. * enough to get the zone back into a desirable shape, we have
  1622. * to swap. Better start now and leave the - probably heavily
  1623. * thrashing - remaining file pages alone.
  1624. */
  1625. if (global_reclaim(sc)) {
  1626. free = zone_page_state(zone, NR_FREE_PAGES);
  1627. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1628. scan_balance = SCAN_ANON;
  1629. goto out;
  1630. }
  1631. }
  1632. /*
  1633. * There is enough inactive page cache, do not reclaim
  1634. * anything from the anonymous working set right now.
  1635. */
  1636. if (!inactive_file_is_low(lruvec)) {
  1637. scan_balance = SCAN_FILE;
  1638. goto out;
  1639. }
  1640. scan_balance = SCAN_FRACT;
  1641. /*
  1642. * With swappiness at 100, anonymous and file have the same priority.
  1643. * This scanning priority is essentially the inverse of IO cost.
  1644. */
  1645. anon_prio = vmscan_swappiness(sc);
  1646. file_prio = 200 - anon_prio;
  1647. /*
  1648. * OK, so we have swap space and a fair amount of page cache
  1649. * pages. We use the recently rotated / recently scanned
  1650. * ratios to determine how valuable each cache is.
  1651. *
  1652. * Because workloads change over time (and to avoid overflow)
  1653. * we keep these statistics as a floating average, which ends
  1654. * up weighing recent references more than old ones.
  1655. *
  1656. * anon in [0], file in [1]
  1657. */
  1658. spin_lock_irq(&zone->lru_lock);
  1659. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1660. reclaim_stat->recent_scanned[0] /= 2;
  1661. reclaim_stat->recent_rotated[0] /= 2;
  1662. }
  1663. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1664. reclaim_stat->recent_scanned[1] /= 2;
  1665. reclaim_stat->recent_rotated[1] /= 2;
  1666. }
  1667. /*
  1668. * The amount of pressure on anon vs file pages is inversely
  1669. * proportional to the fraction of recently scanned pages on
  1670. * each list that were recently referenced and in active use.
  1671. */
  1672. ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
  1673. ap /= reclaim_stat->recent_rotated[0] + 1;
  1674. fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
  1675. fp /= reclaim_stat->recent_rotated[1] + 1;
  1676. spin_unlock_irq(&zone->lru_lock);
  1677. fraction[0] = ap;
  1678. fraction[1] = fp;
  1679. denominator = ap + fp + 1;
  1680. out:
  1681. for_each_evictable_lru(lru) {
  1682. int file = is_file_lru(lru);
  1683. unsigned long size;
  1684. unsigned long scan;
  1685. size = get_lru_size(lruvec, lru);
  1686. scan = size >> sc->priority;
  1687. if (!scan && force_scan)
  1688. scan = min(size, SWAP_CLUSTER_MAX);
  1689. switch (scan_balance) {
  1690. case SCAN_EQUAL:
  1691. /* Scan lists relative to size */
  1692. break;
  1693. case SCAN_FRACT:
  1694. /*
  1695. * Scan types proportional to swappiness and
  1696. * their relative recent reclaim efficiency.
  1697. */
  1698. scan = div64_u64(scan * fraction[file], denominator);
  1699. break;
  1700. case SCAN_FILE:
  1701. case SCAN_ANON:
  1702. /* Scan one type exclusively */
  1703. if ((scan_balance == SCAN_FILE) != file)
  1704. scan = 0;
  1705. break;
  1706. default:
  1707. /* Look ma, no brain */
  1708. BUG();
  1709. }
  1710. nr[lru] = scan;
  1711. }
  1712. }
  1713. /*
  1714. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1715. */
  1716. static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1717. {
  1718. unsigned long nr[NR_LRU_LISTS];
  1719. unsigned long targets[NR_LRU_LISTS];
  1720. unsigned long nr_to_scan;
  1721. enum lru_list lru;
  1722. unsigned long nr_reclaimed = 0;
  1723. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  1724. struct blk_plug plug;
  1725. bool scan_adjusted = false;
  1726. get_scan_count(lruvec, sc, nr);
  1727. /* Record the original scan target for proportional adjustments later */
  1728. memcpy(targets, nr, sizeof(nr));
  1729. blk_start_plug(&plug);
  1730. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1731. nr[LRU_INACTIVE_FILE]) {
  1732. unsigned long nr_anon, nr_file, percentage;
  1733. unsigned long nr_scanned;
  1734. for_each_evictable_lru(lru) {
  1735. if (nr[lru]) {
  1736. nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
  1737. nr[lru] -= nr_to_scan;
  1738. nr_reclaimed += shrink_list(lru, nr_to_scan,
  1739. lruvec, sc);
  1740. }
  1741. }
  1742. if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
  1743. continue;
  1744. /*
  1745. * For global direct reclaim, reclaim only the number of pages
  1746. * requested. Less care is taken to scan proportionally as it
  1747. * is more important to minimise direct reclaim stall latency
  1748. * than it is to properly age the LRU lists.
  1749. */
  1750. if (global_reclaim(sc) && !current_is_kswapd())
  1751. break;
  1752. /*
  1753. * For kswapd and memcg, reclaim at least the number of pages
  1754. * requested. Ensure that the anon and file LRUs shrink
  1755. * proportionally what was requested by get_scan_count(). We
  1756. * stop reclaiming one LRU and reduce the amount scanning
  1757. * proportional to the original scan target.
  1758. */
  1759. nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
  1760. nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
  1761. if (nr_file > nr_anon) {
  1762. unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
  1763. targets[LRU_ACTIVE_ANON] + 1;
  1764. lru = LRU_BASE;
  1765. percentage = nr_anon * 100 / scan_target;
  1766. } else {
  1767. unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
  1768. targets[LRU_ACTIVE_FILE] + 1;
  1769. lru = LRU_FILE;
  1770. percentage = nr_file * 100 / scan_target;
  1771. }
  1772. /* Stop scanning the smaller of the LRU */
  1773. nr[lru] = 0;
  1774. nr[lru + LRU_ACTIVE] = 0;
  1775. /*
  1776. * Recalculate the other LRU scan count based on its original
  1777. * scan target and the percentage scanning already complete
  1778. */
  1779. lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
  1780. nr_scanned = targets[lru] - nr[lru];
  1781. nr[lru] = targets[lru] * (100 - percentage) / 100;
  1782. nr[lru] -= min(nr[lru], nr_scanned);
  1783. lru += LRU_ACTIVE;
  1784. nr_scanned = targets[lru] - nr[lru];
  1785. nr[lru] = targets[lru] * (100 - percentage) / 100;
  1786. nr[lru] -= min(nr[lru], nr_scanned);
  1787. scan_adjusted = true;
  1788. }
  1789. blk_finish_plug(&plug);
  1790. sc->nr_reclaimed += nr_reclaimed;
  1791. /*
  1792. * Even if we did not try to evict anon pages at all, we want to
  1793. * rebalance the anon lru active/inactive ratio.
  1794. */
  1795. if (inactive_anon_is_low(lruvec))
  1796. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  1797. sc, LRU_ACTIVE_ANON);
  1798. throttle_vm_writeout(sc->gfp_mask);
  1799. }
  1800. /* Use reclaim/compaction for costly allocs or under memory pressure */
  1801. static bool in_reclaim_compaction(struct scan_control *sc)
  1802. {
  1803. if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
  1804. (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
  1805. sc->priority < DEF_PRIORITY - 2))
  1806. return true;
  1807. return false;
  1808. }
  1809. /*
  1810. * Reclaim/compaction is used for high-order allocation requests. It reclaims
  1811. * order-0 pages before compacting the zone. should_continue_reclaim() returns
  1812. * true if more pages should be reclaimed such that when the page allocator
  1813. * calls try_to_compact_zone() that it will have enough free pages to succeed.
  1814. * It will give up earlier than that if there is difficulty reclaiming pages.
  1815. */
  1816. static inline bool should_continue_reclaim(struct zone *zone,
  1817. unsigned long nr_reclaimed,
  1818. unsigned long nr_scanned,
  1819. struct scan_control *sc)
  1820. {
  1821. unsigned long pages_for_compaction;
  1822. unsigned long inactive_lru_pages;
  1823. /* If not in reclaim/compaction mode, stop */
  1824. if (!in_reclaim_compaction(sc))
  1825. return false;
  1826. /* Consider stopping depending on scan and reclaim activity */
  1827. if (sc->gfp_mask & __GFP_REPEAT) {
  1828. /*
  1829. * For __GFP_REPEAT allocations, stop reclaiming if the
  1830. * full LRU list has been scanned and we are still failing
  1831. * to reclaim pages. This full LRU scan is potentially
  1832. * expensive but a __GFP_REPEAT caller really wants to succeed
  1833. */
  1834. if (!nr_reclaimed && !nr_scanned)
  1835. return false;
  1836. } else {
  1837. /*
  1838. * For non-__GFP_REPEAT allocations which can presumably
  1839. * fail without consequence, stop if we failed to reclaim
  1840. * any pages from the last SWAP_CLUSTER_MAX number of
  1841. * pages that were scanned. This will return to the
  1842. * caller faster at the risk reclaim/compaction and
  1843. * the resulting allocation attempt fails
  1844. */
  1845. if (!nr_reclaimed)
  1846. return false;
  1847. }
  1848. /*
  1849. * If we have not reclaimed enough pages for compaction and the
  1850. * inactive lists are large enough, continue reclaiming
  1851. */
  1852. pages_for_compaction = (2UL << sc->order);
  1853. inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
  1854. if (get_nr_swap_pages() > 0)
  1855. inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
  1856. if (sc->nr_reclaimed < pages_for_compaction &&
  1857. inactive_lru_pages > pages_for_compaction)
  1858. return true;
  1859. /* If compaction would go ahead or the allocation would succeed, stop */
  1860. switch (compaction_suitable(zone, sc->order)) {
  1861. case COMPACT_PARTIAL:
  1862. case COMPACT_CONTINUE:
  1863. return false;
  1864. default:
  1865. return true;
  1866. }
  1867. }
  1868. static void shrink_zone(struct zone *zone, struct scan_control *sc)
  1869. {
  1870. unsigned long nr_reclaimed, nr_scanned;
  1871. do {
  1872. struct mem_cgroup *root = sc->target_mem_cgroup;
  1873. struct mem_cgroup_reclaim_cookie reclaim = {
  1874. .zone = zone,
  1875. .priority = sc->priority,
  1876. };
  1877. struct mem_cgroup *memcg;
  1878. nr_reclaimed = sc->nr_reclaimed;
  1879. nr_scanned = sc->nr_scanned;
  1880. memcg = mem_cgroup_iter(root, NULL, &reclaim);
  1881. do {
  1882. struct lruvec *lruvec;
  1883. lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  1884. shrink_lruvec(lruvec, sc);
  1885. /*
  1886. * Direct reclaim and kswapd have to scan all memory
  1887. * cgroups to fulfill the overall scan target for the
  1888. * zone.
  1889. *
  1890. * Limit reclaim, on the other hand, only cares about
  1891. * nr_to_reclaim pages to be reclaimed and it will
  1892. * retry with decreasing priority if one round over the
  1893. * whole hierarchy is not sufficient.
  1894. */
  1895. if (!global_reclaim(sc) &&
  1896. sc->nr_reclaimed >= sc->nr_to_reclaim) {
  1897. mem_cgroup_iter_break(root, memcg);
  1898. break;
  1899. }
  1900. memcg = mem_cgroup_iter(root, memcg, &reclaim);
  1901. } while (memcg);
  1902. vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
  1903. sc->nr_scanned - nr_scanned,
  1904. sc->nr_reclaimed - nr_reclaimed);
  1905. } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
  1906. sc->nr_scanned - nr_scanned, sc));
  1907. }
  1908. /* Returns true if compaction should go ahead for a high-order request */
  1909. static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  1910. {
  1911. unsigned long balance_gap, watermark;
  1912. bool watermark_ok;
  1913. /* Do not consider compaction for orders reclaim is meant to satisfy */
  1914. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
  1915. return false;
  1916. /*
  1917. * Compaction takes time to run and there are potentially other
  1918. * callers using the pages just freed. Continue reclaiming until
  1919. * there is a buffer of free pages available to give compaction
  1920. * a reasonable chance of completing and allocating the page
  1921. */
  1922. balance_gap = min(low_wmark_pages(zone),
  1923. (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  1924. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  1925. watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
  1926. watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
  1927. /*
  1928. * If compaction is deferred, reclaim up to a point where
  1929. * compaction will have a chance of success when re-enabled
  1930. */
  1931. if (compaction_deferred(zone, sc->order))
  1932. return watermark_ok;
  1933. /* If compaction is not ready to start, keep reclaiming */
  1934. if (!compaction_suitable(zone, sc->order))
  1935. return false;
  1936. return watermark_ok;
  1937. }
  1938. /*
  1939. * This is the direct reclaim path, for page-allocating processes. We only
  1940. * try to reclaim pages from zones which will satisfy the caller's allocation
  1941. * request.
  1942. *
  1943. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1944. * Because:
  1945. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1946. * allocation or
  1947. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1948. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1949. * zone defense algorithm.
  1950. *
  1951. * If a zone is deemed to be full of pinned pages then just give it a light
  1952. * scan then give up on it.
  1953. *
  1954. * This function returns true if a zone is being reclaimed for a costly
  1955. * high-order allocation and compaction is ready to begin. This indicates to
  1956. * the caller that it should consider retrying the allocation instead of
  1957. * further reclaim.
  1958. */
  1959. static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
  1960. {
  1961. struct zoneref *z;
  1962. struct zone *zone;
  1963. unsigned long nr_soft_reclaimed;
  1964. unsigned long nr_soft_scanned;
  1965. bool aborted_reclaim = false;
  1966. /*
  1967. * If the number of buffer_heads in the machine exceeds the maximum
  1968. * allowed level, force direct reclaim to scan the highmem zone as
  1969. * highmem pages could be pinning lowmem pages storing buffer_heads
  1970. */
  1971. if (buffer_heads_over_limit)
  1972. sc->gfp_mask |= __GFP_HIGHMEM;
  1973. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1974. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1975. if (!populated_zone(zone))
  1976. continue;
  1977. /*
  1978. * Take care memory controller reclaiming has small influence
  1979. * to global LRU.
  1980. */
  1981. if (global_reclaim(sc)) {
  1982. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1983. continue;
  1984. if (zone->all_unreclaimable &&
  1985. sc->priority != DEF_PRIORITY)
  1986. continue; /* Let kswapd poll it */
  1987. if (IS_ENABLED(CONFIG_COMPACTION)) {
  1988. /*
  1989. * If we already have plenty of memory free for
  1990. * compaction in this zone, don't free any more.
  1991. * Even though compaction is invoked for any
  1992. * non-zero order, only frequent costly order
  1993. * reclamation is disruptive enough to become a
  1994. * noticeable problem, like transparent huge
  1995. * page allocations.
  1996. */
  1997. if (compaction_ready(zone, sc)) {
  1998. aborted_reclaim = true;
  1999. continue;
  2000. }
  2001. }
  2002. /*
  2003. * This steals pages from memory cgroups over softlimit
  2004. * and returns the number of reclaimed pages and
  2005. * scanned pages. This works for global memory pressure
  2006. * and balancing, not for a memcg's limit.
  2007. */
  2008. nr_soft_scanned = 0;
  2009. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2010. sc->order, sc->gfp_mask,
  2011. &nr_soft_scanned);
  2012. sc->nr_reclaimed += nr_soft_reclaimed;
  2013. sc->nr_scanned += nr_soft_scanned;
  2014. /* need some check for avoid more shrink_zone() */
  2015. }
  2016. shrink_zone(zone, sc);
  2017. }
  2018. return aborted_reclaim;
  2019. }
  2020. static bool zone_reclaimable(struct zone *zone)
  2021. {
  2022. return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
  2023. }
  2024. /* All zones in zonelist are unreclaimable? */
  2025. static bool all_unreclaimable(struct zonelist *zonelist,
  2026. struct scan_control *sc)
  2027. {
  2028. struct zoneref *z;
  2029. struct zone *zone;
  2030. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  2031. gfp_zone(sc->gfp_mask), sc->nodemask) {
  2032. if (!populated_zone(zone))
  2033. continue;
  2034. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2035. continue;
  2036. if (!zone->all_unreclaimable)
  2037. return false;
  2038. }
  2039. return true;
  2040. }
  2041. /*
  2042. * This is the main entry point to direct page reclaim.
  2043. *
  2044. * If a full scan of the inactive list fails to free enough memory then we
  2045. * are "out of memory" and something needs to be killed.
  2046. *
  2047. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  2048. * high - the zone may be full of dirty or under-writeback pages, which this
  2049. * caller can't do much about. We kick the writeback threads and take explicit
  2050. * naps in the hope that some of these pages can be written. But if the
  2051. * allocating task holds filesystem locks which prevent writeout this might not
  2052. * work, and the allocation attempt will fail.
  2053. *
  2054. * returns: 0, if no pages reclaimed
  2055. * else, the number of pages reclaimed
  2056. */
  2057. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  2058. struct scan_control *sc,
  2059. struct shrink_control *shrink)
  2060. {
  2061. unsigned long total_scanned = 0;
  2062. struct reclaim_state *reclaim_state = current->reclaim_state;
  2063. struct zoneref *z;
  2064. struct zone *zone;
  2065. unsigned long writeback_threshold;
  2066. bool aborted_reclaim;
  2067. delayacct_freepages_start();
  2068. if (global_reclaim(sc))
  2069. count_vm_event(ALLOCSTALL);
  2070. do {
  2071. vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
  2072. sc->priority);
  2073. sc->nr_scanned = 0;
  2074. aborted_reclaim = shrink_zones(zonelist, sc);
  2075. /*
  2076. * Don't shrink slabs when reclaiming memory from over limit
  2077. * cgroups but do shrink slab at least once when aborting
  2078. * reclaim for compaction to avoid unevenly scanning file/anon
  2079. * LRU pages over slab pages.
  2080. */
  2081. if (global_reclaim(sc)) {
  2082. unsigned long lru_pages = 0;
  2083. for_each_zone_zonelist(zone, z, zonelist,
  2084. gfp_zone(sc->gfp_mask)) {
  2085. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2086. continue;
  2087. lru_pages += zone_reclaimable_pages(zone);
  2088. }
  2089. shrink_slab(shrink, sc->nr_scanned, lru_pages);
  2090. if (reclaim_state) {
  2091. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  2092. reclaim_state->reclaimed_slab = 0;
  2093. }
  2094. }
  2095. total_scanned += sc->nr_scanned;
  2096. if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  2097. goto out;
  2098. /*
  2099. * If we're getting trouble reclaiming, start doing
  2100. * writepage even in laptop mode.
  2101. */
  2102. if (sc->priority < DEF_PRIORITY - 2)
  2103. sc->may_writepage = 1;
  2104. /*
  2105. * Try to write back as many pages as we just scanned. This
  2106. * tends to cause slow streaming writers to write data to the
  2107. * disk smoothly, at the dirtying rate, which is nice. But
  2108. * that's undesirable in laptop mode, where we *want* lumpy
  2109. * writeout. So in laptop mode, write out the whole world.
  2110. */
  2111. writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  2112. if (total_scanned > writeback_threshold) {
  2113. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
  2114. WB_REASON_TRY_TO_FREE_PAGES);
  2115. sc->may_writepage = 1;
  2116. }
  2117. } while (--sc->priority >= 0 && !aborted_reclaim);
  2118. out:
  2119. delayacct_freepages_end();
  2120. if (sc->nr_reclaimed)
  2121. return sc->nr_reclaimed;
  2122. /*
  2123. * As hibernation is going on, kswapd is freezed so that it can't mark
  2124. * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
  2125. * check.
  2126. */
  2127. if (oom_killer_disabled)
  2128. return 0;
  2129. /* Aborted reclaim to try compaction? don't OOM, then */
  2130. if (aborted_reclaim)
  2131. return 1;
  2132. /* top priority shrink_zones still had more to do? don't OOM, then */
  2133. if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
  2134. return 1;
  2135. return 0;
  2136. }
  2137. static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
  2138. {
  2139. struct zone *zone;
  2140. unsigned long pfmemalloc_reserve = 0;
  2141. unsigned long free_pages = 0;
  2142. int i;
  2143. bool wmark_ok;
  2144. for (i = 0; i <= ZONE_NORMAL; i++) {
  2145. zone = &pgdat->node_zones[i];
  2146. pfmemalloc_reserve += min_wmark_pages(zone);
  2147. free_pages += zone_page_state(zone, NR_FREE_PAGES);
  2148. }
  2149. wmark_ok = free_pages > pfmemalloc_reserve / 2;
  2150. /* kswapd must be awake if processes are being throttled */
  2151. if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
  2152. pgdat->classzone_idx = min(pgdat->classzone_idx,
  2153. (enum zone_type)ZONE_NORMAL);
  2154. wake_up_interruptible(&pgdat->kswapd_wait);
  2155. }
  2156. return wmark_ok;
  2157. }
  2158. /*
  2159. * Throttle direct reclaimers if backing storage is backed by the network
  2160. * and the PFMEMALLOC reserve for the preferred node is getting dangerously
  2161. * depleted. kswapd will continue to make progress and wake the processes
  2162. * when the low watermark is reached.
  2163. *
  2164. * Returns true if a fatal signal was delivered during throttling. If this
  2165. * happens, the page allocator should not consider triggering the OOM killer.
  2166. */
  2167. static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
  2168. nodemask_t *nodemask)
  2169. {
  2170. struct zone *zone;
  2171. int high_zoneidx = gfp_zone(gfp_mask);
  2172. pg_data_t *pgdat;
  2173. /*
  2174. * Kernel threads should not be throttled as they may be indirectly
  2175. * responsible for cleaning pages necessary for reclaim to make forward
  2176. * progress. kjournald for example may enter direct reclaim while
  2177. * committing a transaction where throttling it could forcing other
  2178. * processes to block on log_wait_commit().
  2179. */
  2180. if (current->flags & PF_KTHREAD)
  2181. goto out;
  2182. /*
  2183. * If a fatal signal is pending, this process should not throttle.
  2184. * It should return quickly so it can exit and free its memory
  2185. */
  2186. if (fatal_signal_pending(current))
  2187. goto out;
  2188. /* Check if the pfmemalloc reserves are ok */
  2189. first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
  2190. pgdat = zone->zone_pgdat;
  2191. if (pfmemalloc_watermark_ok(pgdat))
  2192. goto out;
  2193. /* Account for the throttling */
  2194. count_vm_event(PGSCAN_DIRECT_THROTTLE);
  2195. /*
  2196. * If the caller cannot enter the filesystem, it's possible that it
  2197. * is due to the caller holding an FS lock or performing a journal
  2198. * transaction in the case of a filesystem like ext[3|4]. In this case,
  2199. * it is not safe to block on pfmemalloc_wait as kswapd could be
  2200. * blocked waiting on the same lock. Instead, throttle for up to a
  2201. * second before continuing.
  2202. */
  2203. if (!(gfp_mask & __GFP_FS)) {
  2204. wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
  2205. pfmemalloc_watermark_ok(pgdat), HZ);
  2206. goto check_pending;
  2207. }
  2208. /* Throttle until kswapd wakes the process */
  2209. wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
  2210. pfmemalloc_watermark_ok(pgdat));
  2211. check_pending:
  2212. if (fatal_signal_pending(current))
  2213. return true;
  2214. out:
  2215. return false;
  2216. }
  2217. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  2218. gfp_t gfp_mask, nodemask_t *nodemask)
  2219. {
  2220. unsigned long nr_reclaimed;
  2221. struct scan_control sc = {
  2222. .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
  2223. .may_writepage = !laptop_mode,
  2224. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2225. .may_unmap = 1,
  2226. .may_swap = 1,
  2227. .order = order,
  2228. .priority = DEF_PRIORITY,
  2229. .target_mem_cgroup = NULL,
  2230. .nodemask = nodemask,
  2231. };
  2232. struct shrink_control shrink = {
  2233. .gfp_mask = sc.gfp_mask,
  2234. };
  2235. /*
  2236. * Do not enter reclaim if fatal signal was delivered while throttled.
  2237. * 1 is returned so that the page allocator does not OOM kill at this
  2238. * point.
  2239. */
  2240. if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
  2241. return 1;
  2242. trace_mm_vmscan_direct_reclaim_begin(order,
  2243. sc.may_writepage,
  2244. gfp_mask);
  2245. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2246. trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
  2247. return nr_reclaimed;
  2248. }
  2249. #ifdef CONFIG_MEMCG
  2250. unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
  2251. gfp_t gfp_mask, bool noswap,
  2252. struct zone *zone,
  2253. unsigned long *nr_scanned)
  2254. {
  2255. struct scan_control sc = {
  2256. .nr_scanned = 0,
  2257. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2258. .may_writepage = !laptop_mode,
  2259. .may_unmap = 1,
  2260. .may_swap = !noswap,
  2261. .order = 0,
  2262. .priority = 0,
  2263. .target_mem_cgroup = memcg,
  2264. };
  2265. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  2266. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2267. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  2268. trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
  2269. sc.may_writepage,
  2270. sc.gfp_mask);
  2271. /*
  2272. * NOTE: Although we can get the priority field, using it
  2273. * here is not a good idea, since it limits the pages we can scan.
  2274. * if we don't reclaim here, the shrink_zone from balance_pgdat
  2275. * will pick up pages from other mem cgroup's as well. We hack
  2276. * the priority and make it zero.
  2277. */
  2278. shrink_lruvec(lruvec, &sc);
  2279. trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
  2280. *nr_scanned = sc.nr_scanned;
  2281. return sc.nr_reclaimed;
  2282. }
  2283. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  2284. gfp_t gfp_mask,
  2285. bool noswap)
  2286. {
  2287. struct zonelist *zonelist;
  2288. unsigned long nr_reclaimed;
  2289. int nid;
  2290. struct scan_control sc = {
  2291. .may_writepage = !laptop_mode,
  2292. .may_unmap = 1,
  2293. .may_swap = !noswap,
  2294. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2295. .order = 0,
  2296. .priority = DEF_PRIORITY,
  2297. .target_mem_cgroup = memcg,
  2298. .nodemask = NULL, /* we don't care the placement */
  2299. .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2300. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
  2301. };
  2302. struct shrink_control shrink = {
  2303. .gfp_mask = sc.gfp_mask,
  2304. };
  2305. /*
  2306. * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
  2307. * take care of from where we get pages. So the node where we start the
  2308. * scan does not need to be the current node.
  2309. */
  2310. nid = mem_cgroup_select_victim_node(memcg);
  2311. zonelist = NODE_DATA(nid)->node_zonelists;
  2312. trace_mm_vmscan_memcg_reclaim_begin(0,
  2313. sc.may_writepage,
  2314. sc.gfp_mask);
  2315. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2316. trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
  2317. return nr_reclaimed;
  2318. }
  2319. #endif
  2320. static void age_active_anon(struct zone *zone, struct scan_control *sc)
  2321. {
  2322. struct mem_cgroup *memcg;
  2323. if (!total_swap_pages)
  2324. return;
  2325. memcg = mem_cgroup_iter(NULL, NULL, NULL);
  2326. do {
  2327. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  2328. if (inactive_anon_is_low(lruvec))
  2329. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  2330. sc, LRU_ACTIVE_ANON);
  2331. memcg = mem_cgroup_iter(NULL, memcg, NULL);
  2332. } while (memcg);
  2333. }
  2334. static bool zone_balanced(struct zone *zone, int order,
  2335. unsigned long balance_gap, int classzone_idx)
  2336. {
  2337. if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
  2338. balance_gap, classzone_idx, 0))
  2339. return false;
  2340. if (IS_ENABLED(CONFIG_COMPACTION) && order &&
  2341. !compaction_suitable(zone, order))
  2342. return false;
  2343. return true;
  2344. }
  2345. /*
  2346. * pgdat_balanced() is used when checking if a node is balanced.
  2347. *
  2348. * For order-0, all zones must be balanced!
  2349. *
  2350. * For high-order allocations only zones that meet watermarks and are in a
  2351. * zone allowed by the callers classzone_idx are added to balanced_pages. The
  2352. * total of balanced pages must be at least 25% of the zones allowed by
  2353. * classzone_idx for the node to be considered balanced. Forcing all zones to
  2354. * be balanced for high orders can cause excessive reclaim when there are
  2355. * imbalanced zones.
  2356. * The choice of 25% is due to
  2357. * o a 16M DMA zone that is balanced will not balance a zone on any
  2358. * reasonable sized machine
  2359. * o On all other machines, the top zone must be at least a reasonable
  2360. * percentage of the middle zones. For example, on 32-bit x86, highmem
  2361. * would need to be at least 256M for it to be balance a whole node.
  2362. * Similarly, on x86-64 the Normal zone would need to be at least 1G
  2363. * to balance a node on its own. These seemed like reasonable ratios.
  2364. */
  2365. static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
  2366. {
  2367. unsigned long managed_pages = 0;
  2368. unsigned long balanced_pages = 0;
  2369. int i;
  2370. /* Check the watermark levels */
  2371. for (i = 0; i <= classzone_idx; i++) {
  2372. struct zone *zone = pgdat->node_zones + i;
  2373. if (!populated_zone(zone))
  2374. continue;
  2375. managed_pages += zone->managed_pages;
  2376. /*
  2377. * A special case here:
  2378. *
  2379. * balance_pgdat() skips over all_unreclaimable after
  2380. * DEF_PRIORITY. Effectively, it considers them balanced so
  2381. * they must be considered balanced here as well!
  2382. */
  2383. if (zone->all_unreclaimable) {
  2384. balanced_pages += zone->managed_pages;
  2385. continue;
  2386. }
  2387. if (zone_balanced(zone, order, 0, i))
  2388. balanced_pages += zone->managed_pages;
  2389. else if (!order)
  2390. return false;
  2391. }
  2392. if (order)
  2393. return balanced_pages >= (managed_pages >> 2);
  2394. else
  2395. return true;
  2396. }
  2397. /*
  2398. * Prepare kswapd for sleeping. This verifies that there are no processes
  2399. * waiting in throttle_direct_reclaim() and that watermarks have been met.
  2400. *
  2401. * Returns true if kswapd is ready to sleep
  2402. */
  2403. static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
  2404. int classzone_idx)
  2405. {
  2406. /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
  2407. if (remaining)
  2408. return false;
  2409. /*
  2410. * There is a potential race between when kswapd checks its watermarks
  2411. * and a process gets throttled. There is also a potential race if
  2412. * processes get throttled, kswapd wakes, a large process exits therby
  2413. * balancing the zones that causes kswapd to miss a wakeup. If kswapd
  2414. * is going to sleep, no process should be sleeping on pfmemalloc_wait
  2415. * so wake them now if necessary. If necessary, processes will wake
  2416. * kswapd and get throttled again
  2417. */
  2418. if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
  2419. wake_up(&pgdat->pfmemalloc_wait);
  2420. return false;
  2421. }
  2422. return pgdat_balanced(pgdat, order, classzone_idx);
  2423. }
  2424. /*
  2425. * kswapd shrinks the zone by the number of pages required to reach
  2426. * the high watermark.
  2427. *
  2428. * Returns true if kswapd scanned at least the requested number of pages to
  2429. * reclaim or if the lack of progress was due to pages under writeback.
  2430. * This is used to determine if the scanning priority needs to be raised.
  2431. */
  2432. static bool kswapd_shrink_zone(struct zone *zone,
  2433. int classzone_idx,
  2434. struct scan_control *sc,
  2435. unsigned long lru_pages,
  2436. unsigned long *nr_attempted)
  2437. {
  2438. unsigned long nr_slab;
  2439. int testorder = sc->order;
  2440. unsigned long balance_gap;
  2441. struct reclaim_state *reclaim_state = current->reclaim_state;
  2442. struct shrink_control shrink = {
  2443. .gfp_mask = sc->gfp_mask,
  2444. };
  2445. bool lowmem_pressure;
  2446. /* Reclaim above the high watermark. */
  2447. sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
  2448. /*
  2449. * Kswapd reclaims only single pages with compaction enabled. Trying
  2450. * too hard to reclaim until contiguous free pages have become
  2451. * available can hurt performance by evicting too much useful data
  2452. * from memory. Do not reclaim more than needed for compaction.
  2453. */
  2454. if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
  2455. compaction_suitable(zone, sc->order) !=
  2456. COMPACT_SKIPPED)
  2457. testorder = 0;
  2458. /*
  2459. * We put equal pressure on every zone, unless one zone has way too
  2460. * many pages free already. The "too many pages" is defined as the
  2461. * high wmark plus a "gap" where the gap is either the low
  2462. * watermark or 1% of the zone, whichever is smaller.
  2463. */
  2464. balance_gap = min(low_wmark_pages(zone),
  2465. (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  2466. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  2467. /*
  2468. * If there is no low memory pressure or the zone is balanced then no
  2469. * reclaim is necessary
  2470. */
  2471. lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
  2472. if (!lowmem_pressure && zone_balanced(zone, testorder,
  2473. balance_gap, classzone_idx))
  2474. return true;
  2475. shrink_zone(zone, sc);
  2476. reclaim_state->reclaimed_slab = 0;
  2477. nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
  2478. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  2479. /* Account for the number of pages attempted to reclaim */
  2480. *nr_attempted += sc->nr_to_reclaim;
  2481. if (nr_slab == 0 && !zone_reclaimable(zone))
  2482. zone->all_unreclaimable = 1;
  2483. zone_clear_flag(zone, ZONE_WRITEBACK);
  2484. /*
  2485. * If a zone reaches its high watermark, consider it to be no longer
  2486. * congested. It's possible there are dirty pages backed by congested
  2487. * BDIs but as pressure is relieved, speculatively avoid congestion
  2488. * waits.
  2489. */
  2490. if (!zone->all_unreclaimable &&
  2491. zone_balanced(zone, testorder, 0, classzone_idx)) {
  2492. zone_clear_flag(zone, ZONE_CONGESTED);
  2493. zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
  2494. }
  2495. return sc->nr_scanned >= sc->nr_to_reclaim;
  2496. }
  2497. /*
  2498. * For kswapd, balance_pgdat() will work across all this node's zones until
  2499. * they are all at high_wmark_pages(zone).
  2500. *
  2501. * Returns the final order kswapd was reclaiming at
  2502. *
  2503. * There is special handling here for zones which are full of pinned pages.
  2504. * This can happen if the pages are all mlocked, or if they are all used by
  2505. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  2506. * What we do is to detect the case where all pages in the zone have been
  2507. * scanned twice and there has been zero successful reclaim. Mark the zone as
  2508. * dead and from now on, only perform a short scan. Basically we're polling
  2509. * the zone for when the problem goes away.
  2510. *
  2511. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  2512. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  2513. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  2514. * lower zones regardless of the number of free pages in the lower zones. This
  2515. * interoperates with the page allocator fallback scheme to ensure that aging
  2516. * of pages is balanced across the zones.
  2517. */
  2518. static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  2519. int *classzone_idx)
  2520. {
  2521. int i;
  2522. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  2523. unsigned long nr_soft_reclaimed;
  2524. unsigned long nr_soft_scanned;
  2525. struct scan_control sc = {
  2526. .gfp_mask = GFP_KERNEL,
  2527. .priority = DEF_PRIORITY,
  2528. .may_unmap = 1,
  2529. .may_swap = 1,
  2530. .may_writepage = !laptop_mode,
  2531. .order = order,
  2532. .target_mem_cgroup = NULL,
  2533. };
  2534. count_vm_event(PAGEOUTRUN);
  2535. do {
  2536. unsigned long lru_pages = 0;
  2537. unsigned long nr_attempted = 0;
  2538. bool raise_priority = true;
  2539. bool pgdat_needs_compaction = (order > 0);
  2540. sc.nr_reclaimed = 0;
  2541. /*
  2542. * Scan in the highmem->dma direction for the highest
  2543. * zone which needs scanning
  2544. */
  2545. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  2546. struct zone *zone = pgdat->node_zones + i;
  2547. if (!populated_zone(zone))
  2548. continue;
  2549. if (zone->all_unreclaimable &&
  2550. sc.priority != DEF_PRIORITY)
  2551. continue;
  2552. /*
  2553. * Do some background aging of the anon list, to give
  2554. * pages a chance to be referenced before reclaiming.
  2555. */
  2556. age_active_anon(zone, &sc);
  2557. /*
  2558. * If the number of buffer_heads in the machine
  2559. * exceeds the maximum allowed level and this node
  2560. * has a highmem zone, force kswapd to reclaim from
  2561. * it to relieve lowmem pressure.
  2562. */
  2563. if (buffer_heads_over_limit && is_highmem_idx(i)) {
  2564. end_zone = i;
  2565. break;
  2566. }
  2567. if (!zone_balanced(zone, order, 0, 0)) {
  2568. end_zone = i;
  2569. break;
  2570. } else {
  2571. /*
  2572. * If balanced, clear the dirty and congested
  2573. * flags
  2574. */
  2575. zone_clear_flag(zone, ZONE_CONGESTED);
  2576. zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
  2577. }
  2578. }
  2579. if (i < 0)
  2580. goto out;
  2581. for (i = 0; i <= end_zone; i++) {
  2582. struct zone *zone = pgdat->node_zones + i;
  2583. if (!populated_zone(zone))
  2584. continue;
  2585. lru_pages += zone_reclaimable_pages(zone);
  2586. /*
  2587. * If any zone is currently balanced then kswapd will
  2588. * not call compaction as it is expected that the
  2589. * necessary pages are already available.
  2590. */
  2591. if (pgdat_needs_compaction &&
  2592. zone_watermark_ok(zone, order,
  2593. low_wmark_pages(zone),
  2594. *classzone_idx, 0))
  2595. pgdat_needs_compaction = false;
  2596. }
  2597. /*
  2598. * If we're getting trouble reclaiming, start doing writepage
  2599. * even in laptop mode.
  2600. */
  2601. if (sc.priority < DEF_PRIORITY - 2)
  2602. sc.may_writepage = 1;
  2603. /*
  2604. * Now scan the zone in the dma->highmem direction, stopping
  2605. * at the last zone which needs scanning.
  2606. *
  2607. * We do this because the page allocator works in the opposite
  2608. * direction. This prevents the page allocator from allocating
  2609. * pages behind kswapd's direction of progress, which would
  2610. * cause too much scanning of the lower zones.
  2611. */
  2612. for (i = 0; i <= end_zone; i++) {
  2613. struct zone *zone = pgdat->node_zones + i;
  2614. if (!populated_zone(zone))
  2615. continue;
  2616. if (zone->all_unreclaimable &&
  2617. sc.priority != DEF_PRIORITY)
  2618. continue;
  2619. sc.nr_scanned = 0;
  2620. nr_soft_scanned = 0;
  2621. /*
  2622. * Call soft limit reclaim before calling shrink_zone.
  2623. */
  2624. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2625. order, sc.gfp_mask,
  2626. &nr_soft_scanned);
  2627. sc.nr_reclaimed += nr_soft_reclaimed;
  2628. /*
  2629. * There should be no need to raise the scanning
  2630. * priority if enough pages are already being scanned
  2631. * that that high watermark would be met at 100%
  2632. * efficiency.
  2633. */
  2634. if (kswapd_shrink_zone(zone, end_zone, &sc,
  2635. lru_pages, &nr_attempted))
  2636. raise_priority = false;
  2637. }
  2638. /*
  2639. * If the low watermark is met there is no need for processes
  2640. * to be throttled on pfmemalloc_wait as they should not be
  2641. * able to safely make forward progress. Wake them
  2642. */
  2643. if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
  2644. pfmemalloc_watermark_ok(pgdat))
  2645. wake_up(&pgdat->pfmemalloc_wait);
  2646. /*
  2647. * Fragmentation may mean that the system cannot be rebalanced
  2648. * for high-order allocations in all zones. If twice the
  2649. * allocation size has been reclaimed and the zones are still
  2650. * not balanced then recheck the watermarks at order-0 to
  2651. * prevent kswapd reclaiming excessively. Assume that a
  2652. * process requested a high-order can direct reclaim/compact.
  2653. */
  2654. if (order && sc.nr_reclaimed >= 2UL << order)
  2655. order = sc.order = 0;
  2656. /* Check if kswapd should be suspending */
  2657. if (try_to_freeze() || kthread_should_stop())
  2658. break;
  2659. /*
  2660. * Compact if necessary and kswapd is reclaiming at least the
  2661. * high watermark number of pages as requsted
  2662. */
  2663. if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
  2664. compact_pgdat(pgdat, order);
  2665. /*
  2666. * Raise priority if scanning rate is too low or there was no
  2667. * progress in reclaiming pages
  2668. */
  2669. if (raise_priority || !sc.nr_reclaimed)
  2670. sc.priority--;
  2671. } while (sc.priority >= 1 &&
  2672. !pgdat_balanced(pgdat, order, *classzone_idx));
  2673. out:
  2674. /*
  2675. * Return the order we were reclaiming at so prepare_kswapd_sleep()
  2676. * makes a decision on the order we were last reclaiming at. However,
  2677. * if another caller entered the allocator slow path while kswapd
  2678. * was awake, order will remain at the higher level
  2679. */
  2680. *classzone_idx = end_zone;
  2681. return order;
  2682. }
  2683. static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  2684. {
  2685. long remaining = 0;
  2686. DEFINE_WAIT(wait);
  2687. if (freezing(current) || kthread_should_stop())
  2688. return;
  2689. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2690. /* Try to sleep for a short interval */
  2691. if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
  2692. remaining = schedule_timeout(HZ/10);
  2693. finish_wait(&pgdat->kswapd_wait, &wait);
  2694. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2695. }
  2696. /*
  2697. * After a short sleep, check if it was a premature sleep. If not, then
  2698. * go fully to sleep until explicitly woken up.
  2699. */
  2700. if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
  2701. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  2702. /*
  2703. * vmstat counters are not perfectly accurate and the estimated
  2704. * value for counters such as NR_FREE_PAGES can deviate from the
  2705. * true value by nr_online_cpus * threshold. To avoid the zone
  2706. * watermarks being breached while under pressure, we reduce the
  2707. * per-cpu vmstat threshold while kswapd is awake and restore
  2708. * them before going back to sleep.
  2709. */
  2710. set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
  2711. /*
  2712. * Compaction records what page blocks it recently failed to
  2713. * isolate pages from and skips them in the future scanning.
  2714. * When kswapd is going to sleep, it is reasonable to assume
  2715. * that pages and compaction may succeed so reset the cache.
  2716. */
  2717. reset_isolation_suitable(pgdat);
  2718. if (!kthread_should_stop())
  2719. schedule();
  2720. set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
  2721. } else {
  2722. if (remaining)
  2723. count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
  2724. else
  2725. count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
  2726. }
  2727. finish_wait(&pgdat->kswapd_wait, &wait);
  2728. }
  2729. /*
  2730. * The background pageout daemon, started as a kernel thread
  2731. * from the init process.
  2732. *
  2733. * This basically trickles out pages so that we have _some_
  2734. * free memory available even if there is no other activity
  2735. * that frees anything up. This is needed for things like routing
  2736. * etc, where we otherwise might have all activity going on in
  2737. * asynchronous contexts that cannot page things out.
  2738. *
  2739. * If there are applications that are active memory-allocators
  2740. * (most normal use), this basically shouldn't matter.
  2741. */
  2742. static int kswapd(void *p)
  2743. {
  2744. unsigned long order, new_order;
  2745. unsigned balanced_order;
  2746. int classzone_idx, new_classzone_idx;
  2747. int balanced_classzone_idx;
  2748. pg_data_t *pgdat = (pg_data_t*)p;
  2749. struct task_struct *tsk = current;
  2750. struct reclaim_state reclaim_state = {
  2751. .reclaimed_slab = 0,
  2752. };
  2753. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  2754. lockdep_set_current_reclaim_state(GFP_KERNEL);
  2755. if (!cpumask_empty(cpumask))
  2756. set_cpus_allowed_ptr(tsk, cpumask);
  2757. current->reclaim_state = &reclaim_state;
  2758. /*
  2759. * Tell the memory management that we're a "memory allocator",
  2760. * and that if we need more memory we should get access to it
  2761. * regardless (see "__alloc_pages()"). "kswapd" should
  2762. * never get caught in the normal page freeing logic.
  2763. *
  2764. * (Kswapd normally doesn't need memory anyway, but sometimes
  2765. * you need a small amount of memory in order to be able to
  2766. * page out something else, and this flag essentially protects
  2767. * us from recursively trying to free more memory as we're
  2768. * trying to free the first piece of memory in the first place).
  2769. */
  2770. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  2771. set_freezable();
  2772. order = new_order = 0;
  2773. balanced_order = 0;
  2774. classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
  2775. balanced_classzone_idx = classzone_idx;
  2776. for ( ; ; ) {
  2777. bool ret;
  2778. /*
  2779. * If the last balance_pgdat was unsuccessful it's unlikely a
  2780. * new request of a similar or harder type will succeed soon
  2781. * so consider going to sleep on the basis we reclaimed at
  2782. */
  2783. if (balanced_classzone_idx >= new_classzone_idx &&
  2784. balanced_order == new_order) {
  2785. new_order = pgdat->kswapd_max_order;
  2786. new_classzone_idx = pgdat->classzone_idx;
  2787. pgdat->kswapd_max_order = 0;
  2788. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2789. }
  2790. if (order < new_order || classzone_idx > new_classzone_idx) {
  2791. /*
  2792. * Don't sleep if someone wants a larger 'order'
  2793. * allocation or has tigher zone constraints
  2794. */
  2795. order = new_order;
  2796. classzone_idx = new_classzone_idx;
  2797. } else {
  2798. kswapd_try_to_sleep(pgdat, balanced_order,
  2799. balanced_classzone_idx);
  2800. order = pgdat->kswapd_max_order;
  2801. classzone_idx = pgdat->classzone_idx;
  2802. new_order = order;
  2803. new_classzone_idx = classzone_idx;
  2804. pgdat->kswapd_max_order = 0;
  2805. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2806. }
  2807. ret = try_to_freeze();
  2808. if (kthread_should_stop())
  2809. break;
  2810. /*
  2811. * We can speed up thawing tasks if we don't call balance_pgdat
  2812. * after returning from the refrigerator
  2813. */
  2814. if (!ret) {
  2815. trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
  2816. balanced_classzone_idx = classzone_idx;
  2817. balanced_order = balance_pgdat(pgdat, order,
  2818. &balanced_classzone_idx);
  2819. }
  2820. }
  2821. current->reclaim_state = NULL;
  2822. return 0;
  2823. }
  2824. /*
  2825. * A zone is low on free memory, so wake its kswapd task to service it.
  2826. */
  2827. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  2828. {
  2829. pg_data_t *pgdat;
  2830. if (!populated_zone(zone))
  2831. return;
  2832. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2833. return;
  2834. pgdat = zone->zone_pgdat;
  2835. if (pgdat->kswapd_max_order < order) {
  2836. pgdat->kswapd_max_order = order;
  2837. pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
  2838. }
  2839. if (!waitqueue_active(&pgdat->kswapd_wait))
  2840. return;
  2841. if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
  2842. return;
  2843. trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
  2844. wake_up_interruptible(&pgdat->kswapd_wait);
  2845. }
  2846. /*
  2847. * The reclaimable count would be mostly accurate.
  2848. * The less reclaimable pages may be
  2849. * - mlocked pages, which will be moved to unevictable list when encountered
  2850. * - mapped pages, which may require several travels to be reclaimed
  2851. * - dirty pages, which is not "instantly" reclaimable
  2852. */
  2853. unsigned long global_reclaimable_pages(void)
  2854. {
  2855. int nr;
  2856. nr = global_page_state(NR_ACTIVE_FILE) +
  2857. global_page_state(NR_INACTIVE_FILE);
  2858. if (get_nr_swap_pages() > 0)
  2859. nr += global_page_state(NR_ACTIVE_ANON) +
  2860. global_page_state(NR_INACTIVE_ANON);
  2861. return nr;
  2862. }
  2863. unsigned long zone_reclaimable_pages(struct zone *zone)
  2864. {
  2865. int nr;
  2866. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  2867. zone_page_state(zone, NR_INACTIVE_FILE);
  2868. if (get_nr_swap_pages() > 0)
  2869. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  2870. zone_page_state(zone, NR_INACTIVE_ANON);
  2871. return nr;
  2872. }
  2873. #ifdef CONFIG_HIBERNATION
  2874. /*
  2875. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  2876. * freed pages.
  2877. *
  2878. * Rather than trying to age LRUs the aim is to preserve the overall
  2879. * LRU order by reclaiming preferentially
  2880. * inactive > active > active referenced > active mapped
  2881. */
  2882. unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  2883. {
  2884. struct reclaim_state reclaim_state;
  2885. struct scan_control sc = {
  2886. .gfp_mask = GFP_HIGHUSER_MOVABLE,
  2887. .may_swap = 1,
  2888. .may_unmap = 1,
  2889. .may_writepage = 1,
  2890. .nr_to_reclaim = nr_to_reclaim,
  2891. .hibernation_mode = 1,
  2892. .order = 0,
  2893. .priority = DEF_PRIORITY,
  2894. };
  2895. struct shrink_control shrink = {
  2896. .gfp_mask = sc.gfp_mask,
  2897. };
  2898. struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  2899. struct task_struct *p = current;
  2900. unsigned long nr_reclaimed;
  2901. p->flags |= PF_MEMALLOC;
  2902. lockdep_set_current_reclaim_state(sc.gfp_mask);
  2903. reclaim_state.reclaimed_slab = 0;
  2904. p->reclaim_state = &reclaim_state;
  2905. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2906. p->reclaim_state = NULL;
  2907. lockdep_clear_current_reclaim_state();
  2908. p->flags &= ~PF_MEMALLOC;
  2909. return nr_reclaimed;
  2910. }
  2911. #endif /* CONFIG_HIBERNATION */
  2912. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2913. not required for correctness. So if the last cpu in a node goes
  2914. away, we get changed to run anywhere: as the first one comes back,
  2915. restore their cpu bindings. */
  2916. static int cpu_callback(struct notifier_block *nfb, unsigned long action,
  2917. void *hcpu)
  2918. {
  2919. int nid;
  2920. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2921. for_each_node_state(nid, N_MEMORY) {
  2922. pg_data_t *pgdat = NODE_DATA(nid);
  2923. const struct cpumask *mask;
  2924. mask = cpumask_of_node(pgdat->node_id);
  2925. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2926. /* One of our CPUs online: restore mask */
  2927. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2928. }
  2929. }
  2930. return NOTIFY_OK;
  2931. }
  2932. /*
  2933. * This kswapd start function will be called by init and node-hot-add.
  2934. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2935. */
  2936. int kswapd_run(int nid)
  2937. {
  2938. pg_data_t *pgdat = NODE_DATA(nid);
  2939. int ret = 0;
  2940. if (pgdat->kswapd)
  2941. return 0;
  2942. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2943. if (IS_ERR(pgdat->kswapd)) {
  2944. /* failure at boot is fatal */
  2945. BUG_ON(system_state == SYSTEM_BOOTING);
  2946. pr_err("Failed to start kswapd on node %d\n", nid);
  2947. ret = PTR_ERR(pgdat->kswapd);
  2948. pgdat->kswapd = NULL;
  2949. }
  2950. return ret;
  2951. }
  2952. /*
  2953. * Called by memory hotplug when all memory in a node is offlined. Caller must
  2954. * hold lock_memory_hotplug().
  2955. */
  2956. void kswapd_stop(int nid)
  2957. {
  2958. struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
  2959. if (kswapd) {
  2960. kthread_stop(kswapd);
  2961. NODE_DATA(nid)->kswapd = NULL;
  2962. }
  2963. }
  2964. static int __init kswapd_init(void)
  2965. {
  2966. int nid;
  2967. swap_setup();
  2968. for_each_node_state(nid, N_MEMORY)
  2969. kswapd_run(nid);
  2970. hotcpu_notifier(cpu_callback, 0);
  2971. return 0;
  2972. }
  2973. module_init(kswapd_init)
  2974. #ifdef CONFIG_NUMA
  2975. /*
  2976. * Zone reclaim mode
  2977. *
  2978. * If non-zero call zone_reclaim when the number of free pages falls below
  2979. * the watermarks.
  2980. */
  2981. int zone_reclaim_mode __read_mostly;
  2982. #define RECLAIM_OFF 0
  2983. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2984. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2985. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2986. /*
  2987. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2988. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2989. * a zone.
  2990. */
  2991. #define ZONE_RECLAIM_PRIORITY 4
  2992. /*
  2993. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2994. * occur.
  2995. */
  2996. int sysctl_min_unmapped_ratio = 1;
  2997. /*
  2998. * If the number of slab pages in a zone grows beyond this percentage then
  2999. * slab reclaim needs to occur.
  3000. */
  3001. int sysctl_min_slab_ratio = 5;
  3002. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  3003. {
  3004. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  3005. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  3006. zone_page_state(zone, NR_ACTIVE_FILE);
  3007. /*
  3008. * It's possible for there to be more file mapped pages than
  3009. * accounted for by the pages on the file LRU lists because
  3010. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  3011. */
  3012. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  3013. }
  3014. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  3015. static long zone_pagecache_reclaimable(struct zone *zone)
  3016. {
  3017. long nr_pagecache_reclaimable;
  3018. long delta = 0;
  3019. /*
  3020. * If RECLAIM_SWAP is set, then all file pages are considered
  3021. * potentially reclaimable. Otherwise, we have to worry about
  3022. * pages like swapcache and zone_unmapped_file_pages() provides
  3023. * a better estimate
  3024. */
  3025. if (zone_reclaim_mode & RECLAIM_SWAP)
  3026. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  3027. else
  3028. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  3029. /* If we can't clean pages, remove dirty pages from consideration */
  3030. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  3031. delta += zone_page_state(zone, NR_FILE_DIRTY);
  3032. /* Watch for any possible underflows due to delta */
  3033. if (unlikely(delta > nr_pagecache_reclaimable))
  3034. delta = nr_pagecache_reclaimable;
  3035. return nr_pagecache_reclaimable - delta;
  3036. }
  3037. /*
  3038. * Try to free up some pages from this zone through reclaim.
  3039. */
  3040. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  3041. {
  3042. /* Minimum pages needed in order to stay on node */
  3043. const unsigned long nr_pages = 1 << order;
  3044. struct task_struct *p = current;
  3045. struct reclaim_state reclaim_state;
  3046. struct scan_control sc = {
  3047. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  3048. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  3049. .may_swap = 1,
  3050. .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
  3051. .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
  3052. .order = order,
  3053. .priority = ZONE_RECLAIM_PRIORITY,
  3054. };
  3055. struct shrink_control shrink = {
  3056. .gfp_mask = sc.gfp_mask,
  3057. };
  3058. unsigned long nr_slab_pages0, nr_slab_pages1;
  3059. cond_resched();
  3060. /*
  3061. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  3062. * and we also need to be able to write out pages for RECLAIM_WRITE
  3063. * and RECLAIM_SWAP.
  3064. */
  3065. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  3066. lockdep_set_current_reclaim_state(gfp_mask);
  3067. reclaim_state.reclaimed_slab = 0;
  3068. p->reclaim_state = &reclaim_state;
  3069. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  3070. /*
  3071. * Free memory by calling shrink zone with increasing
  3072. * priorities until we have enough memory freed.
  3073. */
  3074. do {
  3075. shrink_zone(zone, &sc);
  3076. } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
  3077. }
  3078. nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  3079. if (nr_slab_pages0 > zone->min_slab_pages) {
  3080. /*
  3081. * shrink_slab() does not currently allow us to determine how
  3082. * many pages were freed in this zone. So we take the current
  3083. * number of slab pages and shake the slab until it is reduced
  3084. * by the same nr_pages that we used for reclaiming unmapped
  3085. * pages.
  3086. *
  3087. * Note that shrink_slab will free memory on all zones and may
  3088. * take a long time.
  3089. */
  3090. for (;;) {
  3091. unsigned long lru_pages = zone_reclaimable_pages(zone);
  3092. /* No reclaimable slab or very low memory pressure */
  3093. if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
  3094. break;
  3095. /* Freed enough memory */
  3096. nr_slab_pages1 = zone_page_state(zone,
  3097. NR_SLAB_RECLAIMABLE);
  3098. if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
  3099. break;
  3100. }
  3101. /*
  3102. * Update nr_reclaimed by the number of slab pages we
  3103. * reclaimed from this zone.
  3104. */
  3105. nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  3106. if (nr_slab_pages1 < nr_slab_pages0)
  3107. sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
  3108. }
  3109. p->reclaim_state = NULL;
  3110. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  3111. lockdep_clear_current_reclaim_state();
  3112. return sc.nr_reclaimed >= nr_pages;
  3113. }
  3114. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  3115. {
  3116. int node_id;
  3117. int ret;
  3118. /*
  3119. * Zone reclaim reclaims unmapped file backed pages and
  3120. * slab pages if we are over the defined limits.
  3121. *
  3122. * A small portion of unmapped file backed pages is needed for
  3123. * file I/O otherwise pages read by file I/O will be immediately
  3124. * thrown out if the zone is overallocated. So we do not reclaim
  3125. * if less than a specified percentage of the zone is used by
  3126. * unmapped file backed pages.
  3127. */
  3128. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  3129. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  3130. return ZONE_RECLAIM_FULL;
  3131. if (zone->all_unreclaimable)
  3132. return ZONE_RECLAIM_FULL;
  3133. /*
  3134. * Do not scan if the allocation should not be delayed.
  3135. */
  3136. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  3137. return ZONE_RECLAIM_NOSCAN;
  3138. /*
  3139. * Only run zone reclaim on the local zone or on zones that do not
  3140. * have associated processors. This will favor the local processor
  3141. * over remote processors and spread off node memory allocations
  3142. * as wide as possible.
  3143. */
  3144. node_id = zone_to_nid(zone);
  3145. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  3146. return ZONE_RECLAIM_NOSCAN;
  3147. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  3148. return ZONE_RECLAIM_NOSCAN;
  3149. ret = __zone_reclaim(zone, gfp_mask, order);
  3150. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  3151. if (!ret)
  3152. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  3153. return ret;
  3154. }
  3155. #endif
  3156. /*
  3157. * page_evictable - test whether a page is evictable
  3158. * @page: the page to test
  3159. *
  3160. * Test whether page is evictable--i.e., should be placed on active/inactive
  3161. * lists vs unevictable list.
  3162. *
  3163. * Reasons page might not be evictable:
  3164. * (1) page's mapping marked unevictable
  3165. * (2) page is part of an mlocked VMA
  3166. *
  3167. */
  3168. int page_evictable(struct page *page)
  3169. {
  3170. return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
  3171. }
  3172. #ifdef CONFIG_SHMEM
  3173. /**
  3174. * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
  3175. * @pages: array of pages to check
  3176. * @nr_pages: number of pages to check
  3177. *
  3178. * Checks pages for evictability and moves them to the appropriate lru list.
  3179. *
  3180. * This function is only used for SysV IPC SHM_UNLOCK.
  3181. */
  3182. void check_move_unevictable_pages(struct page **pages, int nr_pages)
  3183. {
  3184. struct lruvec *lruvec;
  3185. struct zone *zone = NULL;
  3186. int pgscanned = 0;
  3187. int pgrescued = 0;
  3188. int i;
  3189. for (i = 0; i < nr_pages; i++) {
  3190. struct page *page = pages[i];
  3191. struct zone *pagezone;
  3192. pgscanned++;
  3193. pagezone = page_zone(page);
  3194. if (pagezone != zone) {
  3195. if (zone)
  3196. spin_unlock_irq(&zone->lru_lock);
  3197. zone = pagezone;
  3198. spin_lock_irq(&zone->lru_lock);
  3199. }
  3200. lruvec = mem_cgroup_page_lruvec(page, zone);
  3201. if (!PageLRU(page) || !PageUnevictable(page))
  3202. continue;
  3203. if (page_evictable(page)) {
  3204. enum lru_list lru = page_lru_base_type(page);
  3205. VM_BUG_ON(PageActive(page));
  3206. ClearPageUnevictable(page);
  3207. del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
  3208. add_page_to_lru_list(page, lruvec, lru);
  3209. pgrescued++;
  3210. }
  3211. }
  3212. if (zone) {
  3213. __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
  3214. __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
  3215. spin_unlock_irq(&zone->lru_lock);
  3216. }
  3217. }
  3218. #endif /* CONFIG_SHMEM */
  3219. static void warn_scan_unevictable_pages(void)
  3220. {
  3221. printk_once(KERN_WARNING
  3222. "%s: The scan_unevictable_pages sysctl/node-interface has been "
  3223. "disabled for lack of a legitimate use case. If you have "
  3224. "one, please send an email to linux-mm@kvack.org.\n",
  3225. current->comm);
  3226. }
  3227. /*
  3228. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  3229. * all nodes' unevictable lists for evictable pages
  3230. */
  3231. unsigned long scan_unevictable_pages;
  3232. int scan_unevictable_handler(struct ctl_table *table, int write,
  3233. void __user *buffer,
  3234. size_t *length, loff_t *ppos)
  3235. {
  3236. warn_scan_unevictable_pages();
  3237. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  3238. scan_unevictable_pages = 0;
  3239. return 0;
  3240. }
  3241. #ifdef CONFIG_NUMA
  3242. /*
  3243. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  3244. * a specified node's per zone unevictable lists for evictable pages.
  3245. */
  3246. static ssize_t read_scan_unevictable_node(struct device *dev,
  3247. struct device_attribute *attr,
  3248. char *buf)
  3249. {
  3250. warn_scan_unevictable_pages();
  3251. return sprintf(buf, "0\n"); /* always zero; should fit... */
  3252. }
  3253. static ssize_t write_scan_unevictable_node(struct device *dev,
  3254. struct device_attribute *attr,
  3255. const char *buf, size_t count)
  3256. {
  3257. warn_scan_unevictable_pages();
  3258. return 1;
  3259. }
  3260. static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  3261. read_scan_unevictable_node,
  3262. write_scan_unevictable_node);
  3263. int scan_unevictable_register_node(struct node *node)
  3264. {
  3265. return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3266. }
  3267. void scan_unevictable_unregister_node(struct node *node)
  3268. {
  3269. device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3270. }
  3271. #endif