vmscan.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/gfp.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmpressure.h>
  22. #include <linux/vmstat.h>
  23. #include <linux/file.h>
  24. #include <linux/writeback.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/buffer_head.h> /* for try_to_release_page(),
  27. buffer_heads_over_limit */
  28. #include <linux/mm_inline.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/compaction.h>
  35. #include <linux/notifier.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/delay.h>
  38. #include <linux/kthread.h>
  39. #include <linux/freezer.h>
  40. #include <linux/memcontrol.h>
  41. #include <linux/delayacct.h>
  42. #include <linux/sysctl.h>
  43. #include <linux/oom.h>
  44. #include <linux/prefetch.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/div64.h>
  47. #include <linux/swapops.h>
  48. #include "internal.h"
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/vmscan.h>
  51. struct scan_control {
  52. /* Incremented by the number of inactive pages that were scanned */
  53. unsigned long nr_scanned;
  54. /* Number of pages freed so far during a call to shrink_zones() */
  55. unsigned long nr_reclaimed;
  56. /* How many pages shrink_list() should reclaim */
  57. unsigned long nr_to_reclaim;
  58. unsigned long hibernation_mode;
  59. /* This context's GFP mask */
  60. gfp_t gfp_mask;
  61. int may_writepage;
  62. /* Can mapped pages be reclaimed? */
  63. int may_unmap;
  64. /* Can pages be swapped as part of reclaim? */
  65. int may_swap;
  66. int order;
  67. /* Scan (total_size >> priority) pages at once */
  68. int priority;
  69. /*
  70. * The memory cgroup that hit its limit and as a result is the
  71. * primary target of this reclaim invocation.
  72. */
  73. struct mem_cgroup *target_mem_cgroup;
  74. /*
  75. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  76. * are scanned.
  77. */
  78. nodemask_t *nodemask;
  79. };
  80. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  81. #ifdef ARCH_HAS_PREFETCH
  82. #define prefetch_prev_lru_page(_page, _base, _field) \
  83. do { \
  84. if ((_page)->lru.prev != _base) { \
  85. struct page *prev; \
  86. \
  87. prev = lru_to_page(&(_page->lru)); \
  88. prefetch(&prev->_field); \
  89. } \
  90. } while (0)
  91. #else
  92. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  93. #endif
  94. #ifdef ARCH_HAS_PREFETCHW
  95. #define prefetchw_prev_lru_page(_page, _base, _field) \
  96. do { \
  97. if ((_page)->lru.prev != _base) { \
  98. struct page *prev; \
  99. \
  100. prev = lru_to_page(&(_page->lru)); \
  101. prefetchw(&prev->_field); \
  102. } \
  103. } while (0)
  104. #else
  105. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  106. #endif
  107. /*
  108. * From 0 .. 100. Higher means more swappy.
  109. */
  110. int vm_swappiness = 60;
  111. unsigned long vm_total_pages; /* The total number of pages which the VM controls */
  112. static LIST_HEAD(shrinker_list);
  113. static DECLARE_RWSEM(shrinker_rwsem);
  114. #ifdef CONFIG_MEMCG
  115. static bool global_reclaim(struct scan_control *sc)
  116. {
  117. return !sc->target_mem_cgroup;
  118. }
  119. #else
  120. static bool global_reclaim(struct scan_control *sc)
  121. {
  122. return true;
  123. }
  124. #endif
  125. unsigned long zone_reclaimable_pages(struct zone *zone)
  126. {
  127. int nr;
  128. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  129. zone_page_state(zone, NR_INACTIVE_FILE);
  130. if (get_nr_swap_pages() > 0)
  131. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  132. zone_page_state(zone, NR_INACTIVE_ANON);
  133. return nr;
  134. }
  135. bool zone_reclaimable(struct zone *zone)
  136. {
  137. return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
  138. }
  139. static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  140. {
  141. if (!mem_cgroup_disabled())
  142. return mem_cgroup_get_lru_size(lruvec, lru);
  143. return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
  144. }
  145. /*
  146. * Add a shrinker callback to be called from the vm.
  147. */
  148. int register_shrinker(struct shrinker *shrinker)
  149. {
  150. size_t size = sizeof(*shrinker->nr_deferred);
  151. /*
  152. * If we only have one possible node in the system anyway, save
  153. * ourselves the trouble and disable NUMA aware behavior. This way we
  154. * will save memory and some small loop time later.
  155. */
  156. if (nr_node_ids == 1)
  157. shrinker->flags &= ~SHRINKER_NUMA_AWARE;
  158. if (shrinker->flags & SHRINKER_NUMA_AWARE)
  159. size *= nr_node_ids;
  160. shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
  161. if (!shrinker->nr_deferred)
  162. return -ENOMEM;
  163. down_write(&shrinker_rwsem);
  164. list_add_tail(&shrinker->list, &shrinker_list);
  165. up_write(&shrinker_rwsem);
  166. return 0;
  167. }
  168. EXPORT_SYMBOL(register_shrinker);
  169. /*
  170. * Remove one
  171. */
  172. void unregister_shrinker(struct shrinker *shrinker)
  173. {
  174. down_write(&shrinker_rwsem);
  175. list_del(&shrinker->list);
  176. up_write(&shrinker_rwsem);
  177. }
  178. EXPORT_SYMBOL(unregister_shrinker);
  179. #define SHRINK_BATCH 128
  180. static unsigned long
  181. shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
  182. unsigned long nr_pages_scanned, unsigned long lru_pages)
  183. {
  184. unsigned long freed = 0;
  185. unsigned long long delta;
  186. long total_scan;
  187. long max_pass;
  188. long nr;
  189. long new_nr;
  190. int nid = shrinkctl->nid;
  191. long batch_size = shrinker->batch ? shrinker->batch
  192. : SHRINK_BATCH;
  193. max_pass = shrinker->count_objects(shrinker, shrinkctl);
  194. if (max_pass == 0)
  195. return 0;
  196. /*
  197. * copy the current shrinker scan count into a local variable
  198. * and zero it so that other concurrent shrinker invocations
  199. * don't also do this scanning work.
  200. */
  201. nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
  202. total_scan = nr;
  203. delta = (4 * nr_pages_scanned) / shrinker->seeks;
  204. delta *= max_pass;
  205. do_div(delta, lru_pages + 1);
  206. total_scan += delta;
  207. if (total_scan < 0) {
  208. printk(KERN_ERR
  209. "shrink_slab: %pF negative objects to delete nr=%ld\n",
  210. shrinker->scan_objects, total_scan);
  211. total_scan = max_pass;
  212. }
  213. /*
  214. * We need to avoid excessive windup on filesystem shrinkers
  215. * due to large numbers of GFP_NOFS allocations causing the
  216. * shrinkers to return -1 all the time. This results in a large
  217. * nr being built up so when a shrink that can do some work
  218. * comes along it empties the entire cache due to nr >>>
  219. * max_pass. This is bad for sustaining a working set in
  220. * memory.
  221. *
  222. * Hence only allow the shrinker to scan the entire cache when
  223. * a large delta change is calculated directly.
  224. */
  225. if (delta < max_pass / 4)
  226. total_scan = min(total_scan, max_pass / 2);
  227. /*
  228. * Avoid risking looping forever due to too large nr value:
  229. * never try to free more than twice the estimate number of
  230. * freeable entries.
  231. */
  232. if (total_scan > max_pass * 2)
  233. total_scan = max_pass * 2;
  234. trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
  235. nr_pages_scanned, lru_pages,
  236. max_pass, delta, total_scan);
  237. while (total_scan >= batch_size) {
  238. unsigned long ret;
  239. shrinkctl->nr_to_scan = batch_size;
  240. ret = shrinker->scan_objects(shrinker, shrinkctl);
  241. if (ret == SHRINK_STOP)
  242. break;
  243. freed += ret;
  244. count_vm_events(SLABS_SCANNED, batch_size);
  245. total_scan -= batch_size;
  246. cond_resched();
  247. }
  248. /*
  249. * move the unused scan count back into the shrinker in a
  250. * manner that handles concurrent updates. If we exhausted the
  251. * scan, there is no need to do an update.
  252. */
  253. if (total_scan > 0)
  254. new_nr = atomic_long_add_return(total_scan,
  255. &shrinker->nr_deferred[nid]);
  256. else
  257. new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
  258. trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
  259. return freed;
  260. }
  261. /*
  262. * Call the shrink functions to age shrinkable caches
  263. *
  264. * Here we assume it costs one seek to replace a lru page and that it also
  265. * takes a seek to recreate a cache object. With this in mind we age equal
  266. * percentages of the lru and ageable caches. This should balance the seeks
  267. * generated by these structures.
  268. *
  269. * If the vm encountered mapped pages on the LRU it increase the pressure on
  270. * slab to avoid swapping.
  271. *
  272. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  273. *
  274. * `lru_pages' represents the number of on-LRU pages in all the zones which
  275. * are eligible for the caller's allocation attempt. It is used for balancing
  276. * slab reclaim versus page reclaim.
  277. *
  278. * Returns the number of slab objects which we shrunk.
  279. */
  280. unsigned long shrink_slab(struct shrink_control *shrinkctl,
  281. unsigned long nr_pages_scanned,
  282. unsigned long lru_pages)
  283. {
  284. struct shrinker *shrinker;
  285. unsigned long freed = 0;
  286. if (nr_pages_scanned == 0)
  287. nr_pages_scanned = SWAP_CLUSTER_MAX;
  288. if (!down_read_trylock(&shrinker_rwsem)) {
  289. /*
  290. * If we would return 0, our callers would understand that we
  291. * have nothing else to shrink and give up trying. By returning
  292. * 1 we keep it going and assume we'll be able to shrink next
  293. * time.
  294. */
  295. freed = 1;
  296. goto out;
  297. }
  298. list_for_each_entry(shrinker, &shrinker_list, list) {
  299. for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
  300. if (!node_online(shrinkctl->nid))
  301. continue;
  302. if (!(shrinker->flags & SHRINKER_NUMA_AWARE) &&
  303. (shrinkctl->nid != 0))
  304. break;
  305. freed += shrink_slab_node(shrinkctl, shrinker,
  306. nr_pages_scanned, lru_pages);
  307. }
  308. }
  309. up_read(&shrinker_rwsem);
  310. out:
  311. cond_resched();
  312. return freed;
  313. }
  314. static inline int is_page_cache_freeable(struct page *page)
  315. {
  316. /*
  317. * A freeable page cache page is referenced only by the caller
  318. * that isolated the page, the page cache radix tree and
  319. * optional buffer heads at page->private.
  320. */
  321. return page_count(page) - page_has_private(page) == 2;
  322. }
  323. static int may_write_to_queue(struct backing_dev_info *bdi,
  324. struct scan_control *sc)
  325. {
  326. if (current->flags & PF_SWAPWRITE)
  327. return 1;
  328. if (!bdi_write_congested(bdi))
  329. return 1;
  330. if (bdi == current->backing_dev_info)
  331. return 1;
  332. return 0;
  333. }
  334. /*
  335. * We detected a synchronous write error writing a page out. Probably
  336. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  337. * fsync(), msync() or close().
  338. *
  339. * The tricky part is that after writepage we cannot touch the mapping: nothing
  340. * prevents it from being freed up. But we have a ref on the page and once
  341. * that page is locked, the mapping is pinned.
  342. *
  343. * We're allowed to run sleeping lock_page() here because we know the caller has
  344. * __GFP_FS.
  345. */
  346. static void handle_write_error(struct address_space *mapping,
  347. struct page *page, int error)
  348. {
  349. lock_page(page);
  350. if (page_mapping(page) == mapping)
  351. mapping_set_error(mapping, error);
  352. unlock_page(page);
  353. }
  354. /* possible outcome of pageout() */
  355. typedef enum {
  356. /* failed to write page out, page is locked */
  357. PAGE_KEEP,
  358. /* move page to the active list, page is locked */
  359. PAGE_ACTIVATE,
  360. /* page has been sent to the disk successfully, page is unlocked */
  361. PAGE_SUCCESS,
  362. /* page is clean and locked */
  363. PAGE_CLEAN,
  364. } pageout_t;
  365. /*
  366. * pageout is called by shrink_page_list() for each dirty page.
  367. * Calls ->writepage().
  368. */
  369. static pageout_t pageout(struct page *page, struct address_space *mapping,
  370. struct scan_control *sc)
  371. {
  372. /*
  373. * If the page is dirty, only perform writeback if that write
  374. * will be non-blocking. To prevent this allocation from being
  375. * stalled by pagecache activity. But note that there may be
  376. * stalls if we need to run get_block(). We could test
  377. * PagePrivate for that.
  378. *
  379. * If this process is currently in __generic_file_aio_write() against
  380. * this page's queue, we can perform writeback even if that
  381. * will block.
  382. *
  383. * If the page is swapcache, write it back even if that would
  384. * block, for some throttling. This happens by accident, because
  385. * swap_backing_dev_info is bust: it doesn't reflect the
  386. * congestion state of the swapdevs. Easy to fix, if needed.
  387. */
  388. if (!is_page_cache_freeable(page))
  389. return PAGE_KEEP;
  390. if (!mapping) {
  391. /*
  392. * Some data journaling orphaned pages can have
  393. * page->mapping == NULL while being dirty with clean buffers.
  394. */
  395. if (page_has_private(page)) {
  396. if (try_to_free_buffers(page)) {
  397. ClearPageDirty(page);
  398. printk("%s: orphaned page\n", __func__);
  399. return PAGE_CLEAN;
  400. }
  401. }
  402. return PAGE_KEEP;
  403. }
  404. if (mapping->a_ops->writepage == NULL)
  405. return PAGE_ACTIVATE;
  406. if (!may_write_to_queue(mapping->backing_dev_info, sc))
  407. return PAGE_KEEP;
  408. if (clear_page_dirty_for_io(page)) {
  409. int res;
  410. struct writeback_control wbc = {
  411. .sync_mode = WB_SYNC_NONE,
  412. .nr_to_write = SWAP_CLUSTER_MAX,
  413. .range_start = 0,
  414. .range_end = LLONG_MAX,
  415. .for_reclaim = 1,
  416. };
  417. SetPageReclaim(page);
  418. res = mapping->a_ops->writepage(page, &wbc);
  419. if (res < 0)
  420. handle_write_error(mapping, page, res);
  421. if (res == AOP_WRITEPAGE_ACTIVATE) {
  422. ClearPageReclaim(page);
  423. return PAGE_ACTIVATE;
  424. }
  425. if (!PageWriteback(page)) {
  426. /* synchronous write or broken a_ops? */
  427. ClearPageReclaim(page);
  428. }
  429. trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
  430. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  431. return PAGE_SUCCESS;
  432. }
  433. return PAGE_CLEAN;
  434. }
  435. /*
  436. * Same as remove_mapping, but if the page is removed from the mapping, it
  437. * gets returned with a refcount of 0.
  438. */
  439. static int __remove_mapping(struct address_space *mapping, struct page *page)
  440. {
  441. BUG_ON(!PageLocked(page));
  442. BUG_ON(mapping != page_mapping(page));
  443. spin_lock_irq(&mapping->tree_lock);
  444. /*
  445. * The non racy check for a busy page.
  446. *
  447. * Must be careful with the order of the tests. When someone has
  448. * a ref to the page, it may be possible that they dirty it then
  449. * drop the reference. So if PageDirty is tested before page_count
  450. * here, then the following race may occur:
  451. *
  452. * get_user_pages(&page);
  453. * [user mapping goes away]
  454. * write_to(page);
  455. * !PageDirty(page) [good]
  456. * SetPageDirty(page);
  457. * put_page(page);
  458. * !page_count(page) [good, discard it]
  459. *
  460. * [oops, our write_to data is lost]
  461. *
  462. * Reversing the order of the tests ensures such a situation cannot
  463. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  464. * load is not satisfied before that of page->_count.
  465. *
  466. * Note that if SetPageDirty is always performed via set_page_dirty,
  467. * and thus under tree_lock, then this ordering is not required.
  468. */
  469. if (!page_freeze_refs(page, 2))
  470. goto cannot_free;
  471. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  472. if (unlikely(PageDirty(page))) {
  473. page_unfreeze_refs(page, 2);
  474. goto cannot_free;
  475. }
  476. if (PageSwapCache(page)) {
  477. swp_entry_t swap = { .val = page_private(page) };
  478. __delete_from_swap_cache(page);
  479. spin_unlock_irq(&mapping->tree_lock);
  480. swapcache_free(swap, page);
  481. } else {
  482. void (*freepage)(struct page *);
  483. freepage = mapping->a_ops->freepage;
  484. __delete_from_page_cache(page);
  485. spin_unlock_irq(&mapping->tree_lock);
  486. mem_cgroup_uncharge_cache_page(page);
  487. if (freepage != NULL)
  488. freepage(page);
  489. }
  490. return 1;
  491. cannot_free:
  492. spin_unlock_irq(&mapping->tree_lock);
  493. return 0;
  494. }
  495. /*
  496. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  497. * someone else has a ref on the page, abort and return 0. If it was
  498. * successfully detached, return 1. Assumes the caller has a single ref on
  499. * this page.
  500. */
  501. int remove_mapping(struct address_space *mapping, struct page *page)
  502. {
  503. if (__remove_mapping(mapping, page)) {
  504. /*
  505. * Unfreezing the refcount with 1 rather than 2 effectively
  506. * drops the pagecache ref for us without requiring another
  507. * atomic operation.
  508. */
  509. page_unfreeze_refs(page, 1);
  510. return 1;
  511. }
  512. return 0;
  513. }
  514. /**
  515. * putback_lru_page - put previously isolated page onto appropriate LRU list
  516. * @page: page to be put back to appropriate lru list
  517. *
  518. * Add previously isolated @page to appropriate LRU list.
  519. * Page may still be unevictable for other reasons.
  520. *
  521. * lru_lock must not be held, interrupts must be enabled.
  522. */
  523. void putback_lru_page(struct page *page)
  524. {
  525. bool is_unevictable;
  526. int was_unevictable = PageUnevictable(page);
  527. VM_BUG_ON(PageLRU(page));
  528. redo:
  529. ClearPageUnevictable(page);
  530. if (page_evictable(page)) {
  531. /*
  532. * For evictable pages, we can use the cache.
  533. * In event of a race, worst case is we end up with an
  534. * unevictable page on [in]active list.
  535. * We know how to handle that.
  536. */
  537. is_unevictable = false;
  538. lru_cache_add(page);
  539. } else {
  540. /*
  541. * Put unevictable pages directly on zone's unevictable
  542. * list.
  543. */
  544. is_unevictable = true;
  545. add_page_to_unevictable_list(page);
  546. /*
  547. * When racing with an mlock or AS_UNEVICTABLE clearing
  548. * (page is unlocked) make sure that if the other thread
  549. * does not observe our setting of PG_lru and fails
  550. * isolation/check_move_unevictable_pages,
  551. * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
  552. * the page back to the evictable list.
  553. *
  554. * The other side is TestClearPageMlocked() or shmem_lock().
  555. */
  556. smp_mb();
  557. }
  558. /*
  559. * page's status can change while we move it among lru. If an evictable
  560. * page is on unevictable list, it never be freed. To avoid that,
  561. * check after we added it to the list, again.
  562. */
  563. if (is_unevictable && page_evictable(page)) {
  564. if (!isolate_lru_page(page)) {
  565. put_page(page);
  566. goto redo;
  567. }
  568. /* This means someone else dropped this page from LRU
  569. * So, it will be freed or putback to LRU again. There is
  570. * nothing to do here.
  571. */
  572. }
  573. if (was_unevictable && !is_unevictable)
  574. count_vm_event(UNEVICTABLE_PGRESCUED);
  575. else if (!was_unevictable && is_unevictable)
  576. count_vm_event(UNEVICTABLE_PGCULLED);
  577. put_page(page); /* drop ref from isolate */
  578. }
  579. enum page_references {
  580. PAGEREF_RECLAIM,
  581. PAGEREF_RECLAIM_CLEAN,
  582. PAGEREF_KEEP,
  583. PAGEREF_ACTIVATE,
  584. };
  585. static enum page_references page_check_references(struct page *page,
  586. struct scan_control *sc)
  587. {
  588. int referenced_ptes, referenced_page;
  589. unsigned long vm_flags;
  590. referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
  591. &vm_flags);
  592. referenced_page = TestClearPageReferenced(page);
  593. /*
  594. * Mlock lost the isolation race with us. Let try_to_unmap()
  595. * move the page to the unevictable list.
  596. */
  597. if (vm_flags & VM_LOCKED)
  598. return PAGEREF_RECLAIM;
  599. if (referenced_ptes) {
  600. if (PageSwapBacked(page))
  601. return PAGEREF_ACTIVATE;
  602. /*
  603. * All mapped pages start out with page table
  604. * references from the instantiating fault, so we need
  605. * to look twice if a mapped file page is used more
  606. * than once.
  607. *
  608. * Mark it and spare it for another trip around the
  609. * inactive list. Another page table reference will
  610. * lead to its activation.
  611. *
  612. * Note: the mark is set for activated pages as well
  613. * so that recently deactivated but used pages are
  614. * quickly recovered.
  615. */
  616. SetPageReferenced(page);
  617. if (referenced_page || referenced_ptes > 1)
  618. return PAGEREF_ACTIVATE;
  619. /*
  620. * Activate file-backed executable pages after first usage.
  621. */
  622. if (vm_flags & VM_EXEC)
  623. return PAGEREF_ACTIVATE;
  624. return PAGEREF_KEEP;
  625. }
  626. /* Reclaim if clean, defer dirty pages to writeback */
  627. if (referenced_page && !PageSwapBacked(page))
  628. return PAGEREF_RECLAIM_CLEAN;
  629. return PAGEREF_RECLAIM;
  630. }
  631. /* Check if a page is dirty or under writeback */
  632. static void page_check_dirty_writeback(struct page *page,
  633. bool *dirty, bool *writeback)
  634. {
  635. struct address_space *mapping;
  636. /*
  637. * Anonymous pages are not handled by flushers and must be written
  638. * from reclaim context. Do not stall reclaim based on them
  639. */
  640. if (!page_is_file_cache(page)) {
  641. *dirty = false;
  642. *writeback = false;
  643. return;
  644. }
  645. /* By default assume that the page flags are accurate */
  646. *dirty = PageDirty(page);
  647. *writeback = PageWriteback(page);
  648. /* Verify dirty/writeback state if the filesystem supports it */
  649. if (!page_has_private(page))
  650. return;
  651. mapping = page_mapping(page);
  652. if (mapping && mapping->a_ops->is_dirty_writeback)
  653. mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
  654. }
  655. /*
  656. * shrink_page_list() returns the number of reclaimed pages
  657. */
  658. static unsigned long shrink_page_list(struct list_head *page_list,
  659. struct zone *zone,
  660. struct scan_control *sc,
  661. enum ttu_flags ttu_flags,
  662. unsigned long *ret_nr_dirty,
  663. unsigned long *ret_nr_unqueued_dirty,
  664. unsigned long *ret_nr_congested,
  665. unsigned long *ret_nr_writeback,
  666. unsigned long *ret_nr_immediate,
  667. bool force_reclaim)
  668. {
  669. LIST_HEAD(ret_pages);
  670. LIST_HEAD(free_pages);
  671. int pgactivate = 0;
  672. unsigned long nr_unqueued_dirty = 0;
  673. unsigned long nr_dirty = 0;
  674. unsigned long nr_congested = 0;
  675. unsigned long nr_reclaimed = 0;
  676. unsigned long nr_writeback = 0;
  677. unsigned long nr_immediate = 0;
  678. cond_resched();
  679. mem_cgroup_uncharge_start();
  680. while (!list_empty(page_list)) {
  681. struct address_space *mapping;
  682. struct page *page;
  683. int may_enter_fs;
  684. enum page_references references = PAGEREF_RECLAIM_CLEAN;
  685. bool dirty, writeback;
  686. cond_resched();
  687. page = lru_to_page(page_list);
  688. list_del(&page->lru);
  689. if (!trylock_page(page))
  690. goto keep;
  691. VM_BUG_ON(PageActive(page));
  692. VM_BUG_ON(page_zone(page) != zone);
  693. sc->nr_scanned++;
  694. if (unlikely(!page_evictable(page)))
  695. goto cull_mlocked;
  696. if (!sc->may_unmap && page_mapped(page))
  697. goto keep_locked;
  698. /* Double the slab pressure for mapped and swapcache pages */
  699. if (page_mapped(page) || PageSwapCache(page))
  700. sc->nr_scanned++;
  701. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  702. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  703. /*
  704. * The number of dirty pages determines if a zone is marked
  705. * reclaim_congested which affects wait_iff_congested. kswapd
  706. * will stall and start writing pages if the tail of the LRU
  707. * is all dirty unqueued pages.
  708. */
  709. page_check_dirty_writeback(page, &dirty, &writeback);
  710. if (dirty || writeback)
  711. nr_dirty++;
  712. if (dirty && !writeback)
  713. nr_unqueued_dirty++;
  714. /*
  715. * Treat this page as congested if the underlying BDI is or if
  716. * pages are cycling through the LRU so quickly that the
  717. * pages marked for immediate reclaim are making it to the
  718. * end of the LRU a second time.
  719. */
  720. mapping = page_mapping(page);
  721. if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
  722. (writeback && PageReclaim(page)))
  723. nr_congested++;
  724. /*
  725. * If a page at the tail of the LRU is under writeback, there
  726. * are three cases to consider.
  727. *
  728. * 1) If reclaim is encountering an excessive number of pages
  729. * under writeback and this page is both under writeback and
  730. * PageReclaim then it indicates that pages are being queued
  731. * for IO but are being recycled through the LRU before the
  732. * IO can complete. Waiting on the page itself risks an
  733. * indefinite stall if it is impossible to writeback the
  734. * page due to IO error or disconnected storage so instead
  735. * note that the LRU is being scanned too quickly and the
  736. * caller can stall after page list has been processed.
  737. *
  738. * 2) Global reclaim encounters a page, memcg encounters a
  739. * page that is not marked for immediate reclaim or
  740. * the caller does not have __GFP_IO. In this case mark
  741. * the page for immediate reclaim and continue scanning.
  742. *
  743. * __GFP_IO is checked because a loop driver thread might
  744. * enter reclaim, and deadlock if it waits on a page for
  745. * which it is needed to do the write (loop masks off
  746. * __GFP_IO|__GFP_FS for this reason); but more thought
  747. * would probably show more reasons.
  748. *
  749. * Don't require __GFP_FS, since we're not going into the
  750. * FS, just waiting on its writeback completion. Worryingly,
  751. * ext4 gfs2 and xfs allocate pages with
  752. * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
  753. * may_enter_fs here is liable to OOM on them.
  754. *
  755. * 3) memcg encounters a page that is not already marked
  756. * PageReclaim. memcg does not have any dirty pages
  757. * throttling so we could easily OOM just because too many
  758. * pages are in writeback and there is nothing else to
  759. * reclaim. Wait for the writeback to complete.
  760. */
  761. if (PageWriteback(page)) {
  762. /* Case 1 above */
  763. if (current_is_kswapd() &&
  764. PageReclaim(page) &&
  765. zone_is_reclaim_writeback(zone)) {
  766. nr_immediate++;
  767. goto keep_locked;
  768. /* Case 2 above */
  769. } else if (global_reclaim(sc) ||
  770. !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
  771. /*
  772. * This is slightly racy - end_page_writeback()
  773. * might have just cleared PageReclaim, then
  774. * setting PageReclaim here end up interpreted
  775. * as PageReadahead - but that does not matter
  776. * enough to care. What we do want is for this
  777. * page to have PageReclaim set next time memcg
  778. * reclaim reaches the tests above, so it will
  779. * then wait_on_page_writeback() to avoid OOM;
  780. * and it's also appropriate in global reclaim.
  781. */
  782. SetPageReclaim(page);
  783. nr_writeback++;
  784. goto keep_locked;
  785. /* Case 3 above */
  786. } else {
  787. wait_on_page_writeback(page);
  788. }
  789. }
  790. if (!force_reclaim)
  791. references = page_check_references(page, sc);
  792. switch (references) {
  793. case PAGEREF_ACTIVATE:
  794. goto activate_locked;
  795. case PAGEREF_KEEP:
  796. goto keep_locked;
  797. case PAGEREF_RECLAIM:
  798. case PAGEREF_RECLAIM_CLEAN:
  799. ; /* try to reclaim the page below */
  800. }
  801. /*
  802. * Anonymous process memory has backing store?
  803. * Try to allocate it some swap space here.
  804. */
  805. if (PageAnon(page) && !PageSwapCache(page)) {
  806. if (!(sc->gfp_mask & __GFP_IO))
  807. goto keep_locked;
  808. if (!add_to_swap(page, page_list))
  809. goto activate_locked;
  810. may_enter_fs = 1;
  811. /* Adding to swap updated mapping */
  812. mapping = page_mapping(page);
  813. }
  814. /*
  815. * The page is mapped into the page tables of one or more
  816. * processes. Try to unmap it here.
  817. */
  818. if (page_mapped(page) && mapping) {
  819. switch (try_to_unmap(page, ttu_flags)) {
  820. case SWAP_FAIL:
  821. goto activate_locked;
  822. case SWAP_AGAIN:
  823. goto keep_locked;
  824. case SWAP_MLOCK:
  825. goto cull_mlocked;
  826. case SWAP_SUCCESS:
  827. ; /* try to free the page below */
  828. }
  829. }
  830. if (PageDirty(page)) {
  831. /*
  832. * Only kswapd can writeback filesystem pages to
  833. * avoid risk of stack overflow but only writeback
  834. * if many dirty pages have been encountered.
  835. */
  836. if (page_is_file_cache(page) &&
  837. (!current_is_kswapd() ||
  838. !zone_is_reclaim_dirty(zone))) {
  839. /*
  840. * Immediately reclaim when written back.
  841. * Similar in principal to deactivate_page()
  842. * except we already have the page isolated
  843. * and know it's dirty
  844. */
  845. inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
  846. SetPageReclaim(page);
  847. goto keep_locked;
  848. }
  849. if (references == PAGEREF_RECLAIM_CLEAN)
  850. goto keep_locked;
  851. if (!may_enter_fs)
  852. goto keep_locked;
  853. if (!sc->may_writepage)
  854. goto keep_locked;
  855. /* Page is dirty, try to write it out here */
  856. switch (pageout(page, mapping, sc)) {
  857. case PAGE_KEEP:
  858. goto keep_locked;
  859. case PAGE_ACTIVATE:
  860. goto activate_locked;
  861. case PAGE_SUCCESS:
  862. if (PageWriteback(page))
  863. goto keep;
  864. if (PageDirty(page))
  865. goto keep;
  866. /*
  867. * A synchronous write - probably a ramdisk. Go
  868. * ahead and try to reclaim the page.
  869. */
  870. if (!trylock_page(page))
  871. goto keep;
  872. if (PageDirty(page) || PageWriteback(page))
  873. goto keep_locked;
  874. mapping = page_mapping(page);
  875. case PAGE_CLEAN:
  876. ; /* try to free the page below */
  877. }
  878. }
  879. /*
  880. * If the page has buffers, try to free the buffer mappings
  881. * associated with this page. If we succeed we try to free
  882. * the page as well.
  883. *
  884. * We do this even if the page is PageDirty().
  885. * try_to_release_page() does not perform I/O, but it is
  886. * possible for a page to have PageDirty set, but it is actually
  887. * clean (all its buffers are clean). This happens if the
  888. * buffers were written out directly, with submit_bh(). ext3
  889. * will do this, as well as the blockdev mapping.
  890. * try_to_release_page() will discover that cleanness and will
  891. * drop the buffers and mark the page clean - it can be freed.
  892. *
  893. * Rarely, pages can have buffers and no ->mapping. These are
  894. * the pages which were not successfully invalidated in
  895. * truncate_complete_page(). We try to drop those buffers here
  896. * and if that worked, and the page is no longer mapped into
  897. * process address space (page_count == 1) it can be freed.
  898. * Otherwise, leave the page on the LRU so it is swappable.
  899. */
  900. if (page_has_private(page)) {
  901. if (!try_to_release_page(page, sc->gfp_mask))
  902. goto activate_locked;
  903. if (!mapping && page_count(page) == 1) {
  904. unlock_page(page);
  905. if (put_page_testzero(page))
  906. goto free_it;
  907. else {
  908. /*
  909. * rare race with speculative reference.
  910. * the speculative reference will free
  911. * this page shortly, so we may
  912. * increment nr_reclaimed here (and
  913. * leave it off the LRU).
  914. */
  915. nr_reclaimed++;
  916. continue;
  917. }
  918. }
  919. }
  920. if (!mapping || !__remove_mapping(mapping, page))
  921. goto keep_locked;
  922. /*
  923. * At this point, we have no other references and there is
  924. * no way to pick any more up (removed from LRU, removed
  925. * from pagecache). Can use non-atomic bitops now (and
  926. * we obviously don't have to worry about waking up a process
  927. * waiting on the page lock, because there are no references.
  928. */
  929. __clear_page_locked(page);
  930. free_it:
  931. nr_reclaimed++;
  932. /*
  933. * Is there need to periodically free_page_list? It would
  934. * appear not as the counts should be low
  935. */
  936. list_add(&page->lru, &free_pages);
  937. continue;
  938. cull_mlocked:
  939. if (PageSwapCache(page))
  940. try_to_free_swap(page);
  941. unlock_page(page);
  942. putback_lru_page(page);
  943. continue;
  944. activate_locked:
  945. /* Not a candidate for swapping, so reclaim swap space. */
  946. if (PageSwapCache(page) && vm_swap_full())
  947. try_to_free_swap(page);
  948. VM_BUG_ON(PageActive(page));
  949. SetPageActive(page);
  950. pgactivate++;
  951. keep_locked:
  952. unlock_page(page);
  953. keep:
  954. list_add(&page->lru, &ret_pages);
  955. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  956. }
  957. free_hot_cold_page_list(&free_pages, 1);
  958. list_splice(&ret_pages, page_list);
  959. count_vm_events(PGACTIVATE, pgactivate);
  960. mem_cgroup_uncharge_end();
  961. *ret_nr_dirty += nr_dirty;
  962. *ret_nr_congested += nr_congested;
  963. *ret_nr_unqueued_dirty += nr_unqueued_dirty;
  964. *ret_nr_writeback += nr_writeback;
  965. *ret_nr_immediate += nr_immediate;
  966. return nr_reclaimed;
  967. }
  968. unsigned long reclaim_clean_pages_from_list(struct zone *zone,
  969. struct list_head *page_list)
  970. {
  971. struct scan_control sc = {
  972. .gfp_mask = GFP_KERNEL,
  973. .priority = DEF_PRIORITY,
  974. .may_unmap = 1,
  975. };
  976. unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
  977. struct page *page, *next;
  978. LIST_HEAD(clean_pages);
  979. list_for_each_entry_safe(page, next, page_list, lru) {
  980. if (page_is_file_cache(page) && !PageDirty(page)) {
  981. ClearPageActive(page);
  982. list_move(&page->lru, &clean_pages);
  983. }
  984. }
  985. ret = shrink_page_list(&clean_pages, zone, &sc,
  986. TTU_UNMAP|TTU_IGNORE_ACCESS,
  987. &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
  988. list_splice(&clean_pages, page_list);
  989. __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
  990. return ret;
  991. }
  992. /*
  993. * Attempt to remove the specified page from its LRU. Only take this page
  994. * if it is of the appropriate PageActive status. Pages which are being
  995. * freed elsewhere are also ignored.
  996. *
  997. * page: page to consider
  998. * mode: one of the LRU isolation modes defined above
  999. *
  1000. * returns 0 on success, -ve errno on failure.
  1001. */
  1002. int __isolate_lru_page(struct page *page, isolate_mode_t mode)
  1003. {
  1004. int ret = -EINVAL;
  1005. /* Only take pages on the LRU. */
  1006. if (!PageLRU(page))
  1007. return ret;
  1008. /* Compaction should not handle unevictable pages but CMA can do so */
  1009. if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
  1010. return ret;
  1011. ret = -EBUSY;
  1012. /*
  1013. * To minimise LRU disruption, the caller can indicate that it only
  1014. * wants to isolate pages it will be able to operate on without
  1015. * blocking - clean pages for the most part.
  1016. *
  1017. * ISOLATE_CLEAN means that only clean pages should be isolated. This
  1018. * is used by reclaim when it is cannot write to backing storage
  1019. *
  1020. * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
  1021. * that it is possible to migrate without blocking
  1022. */
  1023. if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
  1024. /* All the caller can do on PageWriteback is block */
  1025. if (PageWriteback(page))
  1026. return ret;
  1027. if (PageDirty(page)) {
  1028. struct address_space *mapping;
  1029. /* ISOLATE_CLEAN means only clean pages */
  1030. if (mode & ISOLATE_CLEAN)
  1031. return ret;
  1032. /*
  1033. * Only pages without mappings or that have a
  1034. * ->migratepage callback are possible to migrate
  1035. * without blocking
  1036. */
  1037. mapping = page_mapping(page);
  1038. if (mapping && !mapping->a_ops->migratepage)
  1039. return ret;
  1040. }
  1041. }
  1042. if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
  1043. return ret;
  1044. if (likely(get_page_unless_zero(page))) {
  1045. /*
  1046. * Be careful not to clear PageLRU until after we're
  1047. * sure the page is not being freed elsewhere -- the
  1048. * page release code relies on it.
  1049. */
  1050. ClearPageLRU(page);
  1051. ret = 0;
  1052. }
  1053. return ret;
  1054. }
  1055. /*
  1056. * zone->lru_lock is heavily contended. Some of the functions that
  1057. * shrink the lists perform better by taking out a batch of pages
  1058. * and working on them outside the LRU lock.
  1059. *
  1060. * For pagecache intensive workloads, this function is the hottest
  1061. * spot in the kernel (apart from copy_*_user functions).
  1062. *
  1063. * Appropriate locks must be held before calling this function.
  1064. *
  1065. * @nr_to_scan: The number of pages to look through on the list.
  1066. * @lruvec: The LRU vector to pull pages from.
  1067. * @dst: The temp list to put pages on to.
  1068. * @nr_scanned: The number of pages that were scanned.
  1069. * @sc: The scan_control struct for this reclaim session
  1070. * @mode: One of the LRU isolation modes
  1071. * @lru: LRU list id for isolating
  1072. *
  1073. * returns how many pages were moved onto *@dst.
  1074. */
  1075. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  1076. struct lruvec *lruvec, struct list_head *dst,
  1077. unsigned long *nr_scanned, struct scan_control *sc,
  1078. isolate_mode_t mode, enum lru_list lru)
  1079. {
  1080. struct list_head *src = &lruvec->lists[lru];
  1081. unsigned long nr_taken = 0;
  1082. unsigned long scan;
  1083. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  1084. struct page *page;
  1085. int nr_pages;
  1086. page = lru_to_page(src);
  1087. prefetchw_prev_lru_page(page, src, flags);
  1088. VM_BUG_ON(!PageLRU(page));
  1089. switch (__isolate_lru_page(page, mode)) {
  1090. case 0:
  1091. nr_pages = hpage_nr_pages(page);
  1092. mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
  1093. list_move(&page->lru, dst);
  1094. nr_taken += nr_pages;
  1095. break;
  1096. case -EBUSY:
  1097. /* else it is being freed elsewhere */
  1098. list_move(&page->lru, src);
  1099. continue;
  1100. default:
  1101. BUG();
  1102. }
  1103. }
  1104. *nr_scanned = scan;
  1105. trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
  1106. nr_taken, mode, is_file_lru(lru));
  1107. return nr_taken;
  1108. }
  1109. /**
  1110. * isolate_lru_page - tries to isolate a page from its LRU list
  1111. * @page: page to isolate from its LRU list
  1112. *
  1113. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  1114. * vmstat statistic corresponding to whatever LRU list the page was on.
  1115. *
  1116. * Returns 0 if the page was removed from an LRU list.
  1117. * Returns -EBUSY if the page was not on an LRU list.
  1118. *
  1119. * The returned page will have PageLRU() cleared. If it was found on
  1120. * the active list, it will have PageActive set. If it was found on
  1121. * the unevictable list, it will have the PageUnevictable bit set. That flag
  1122. * may need to be cleared by the caller before letting the page go.
  1123. *
  1124. * The vmstat statistic corresponding to the list on which the page was
  1125. * found will be decremented.
  1126. *
  1127. * Restrictions:
  1128. * (1) Must be called with an elevated refcount on the page. This is a
  1129. * fundamentnal difference from isolate_lru_pages (which is called
  1130. * without a stable reference).
  1131. * (2) the lru_lock must not be held.
  1132. * (3) interrupts must be enabled.
  1133. */
  1134. int isolate_lru_page(struct page *page)
  1135. {
  1136. int ret = -EBUSY;
  1137. VM_BUG_ON(!page_count(page));
  1138. if (PageLRU(page)) {
  1139. struct zone *zone = page_zone(page);
  1140. struct lruvec *lruvec;
  1141. spin_lock_irq(&zone->lru_lock);
  1142. lruvec = mem_cgroup_page_lruvec(page, zone);
  1143. if (PageLRU(page)) {
  1144. int lru = page_lru(page);
  1145. get_page(page);
  1146. ClearPageLRU(page);
  1147. del_page_from_lru_list(page, lruvec, lru);
  1148. ret = 0;
  1149. }
  1150. spin_unlock_irq(&zone->lru_lock);
  1151. }
  1152. return ret;
  1153. }
  1154. /*
  1155. * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
  1156. * then get resheduled. When there are massive number of tasks doing page
  1157. * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
  1158. * the LRU list will go small and be scanned faster than necessary, leading to
  1159. * unnecessary swapping, thrashing and OOM.
  1160. */
  1161. static int too_many_isolated(struct zone *zone, int file,
  1162. struct scan_control *sc)
  1163. {
  1164. unsigned long inactive, isolated;
  1165. if (current_is_kswapd())
  1166. return 0;
  1167. if (!global_reclaim(sc))
  1168. return 0;
  1169. if (file) {
  1170. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1171. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  1172. } else {
  1173. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1174. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  1175. }
  1176. /*
  1177. * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
  1178. * won't get blocked by normal direct-reclaimers, forming a circular
  1179. * deadlock.
  1180. */
  1181. if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
  1182. inactive >>= 3;
  1183. return isolated > inactive;
  1184. }
  1185. static noinline_for_stack void
  1186. putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
  1187. {
  1188. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1189. struct zone *zone = lruvec_zone(lruvec);
  1190. LIST_HEAD(pages_to_free);
  1191. /*
  1192. * Put back any unfreeable pages.
  1193. */
  1194. while (!list_empty(page_list)) {
  1195. struct page *page = lru_to_page(page_list);
  1196. int lru;
  1197. VM_BUG_ON(PageLRU(page));
  1198. list_del(&page->lru);
  1199. if (unlikely(!page_evictable(page))) {
  1200. spin_unlock_irq(&zone->lru_lock);
  1201. putback_lru_page(page);
  1202. spin_lock_irq(&zone->lru_lock);
  1203. continue;
  1204. }
  1205. lruvec = mem_cgroup_page_lruvec(page, zone);
  1206. SetPageLRU(page);
  1207. lru = page_lru(page);
  1208. add_page_to_lru_list(page, lruvec, lru);
  1209. if (is_active_lru(lru)) {
  1210. int file = is_file_lru(lru);
  1211. int numpages = hpage_nr_pages(page);
  1212. reclaim_stat->recent_rotated[file] += numpages;
  1213. }
  1214. if (put_page_testzero(page)) {
  1215. __ClearPageLRU(page);
  1216. __ClearPageActive(page);
  1217. del_page_from_lru_list(page, lruvec, lru);
  1218. if (unlikely(PageCompound(page))) {
  1219. spin_unlock_irq(&zone->lru_lock);
  1220. (*get_compound_page_dtor(page))(page);
  1221. spin_lock_irq(&zone->lru_lock);
  1222. } else
  1223. list_add(&page->lru, &pages_to_free);
  1224. }
  1225. }
  1226. /*
  1227. * To save our caller's stack, now use input list for pages to free.
  1228. */
  1229. list_splice(&pages_to_free, page_list);
  1230. }
  1231. /*
  1232. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  1233. * of reclaimed pages
  1234. */
  1235. static noinline_for_stack unsigned long
  1236. shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
  1237. struct scan_control *sc, enum lru_list lru)
  1238. {
  1239. LIST_HEAD(page_list);
  1240. unsigned long nr_scanned;
  1241. unsigned long nr_reclaimed = 0;
  1242. unsigned long nr_taken;
  1243. unsigned long nr_dirty = 0;
  1244. unsigned long nr_congested = 0;
  1245. unsigned long nr_unqueued_dirty = 0;
  1246. unsigned long nr_writeback = 0;
  1247. unsigned long nr_immediate = 0;
  1248. isolate_mode_t isolate_mode = 0;
  1249. int file = is_file_lru(lru);
  1250. struct zone *zone = lruvec_zone(lruvec);
  1251. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1252. while (unlikely(too_many_isolated(zone, file, sc))) {
  1253. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1254. /* We are about to die and free our memory. Return now. */
  1255. if (fatal_signal_pending(current))
  1256. return SWAP_CLUSTER_MAX;
  1257. }
  1258. lru_add_drain();
  1259. if (!sc->may_unmap)
  1260. isolate_mode |= ISOLATE_UNMAPPED;
  1261. if (!sc->may_writepage)
  1262. isolate_mode |= ISOLATE_CLEAN;
  1263. spin_lock_irq(&zone->lru_lock);
  1264. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
  1265. &nr_scanned, sc, isolate_mode, lru);
  1266. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1267. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1268. if (global_reclaim(sc)) {
  1269. zone->pages_scanned += nr_scanned;
  1270. if (current_is_kswapd())
  1271. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
  1272. else
  1273. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
  1274. }
  1275. spin_unlock_irq(&zone->lru_lock);
  1276. if (nr_taken == 0)
  1277. return 0;
  1278. nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
  1279. &nr_dirty, &nr_unqueued_dirty, &nr_congested,
  1280. &nr_writeback, &nr_immediate,
  1281. false);
  1282. spin_lock_irq(&zone->lru_lock);
  1283. reclaim_stat->recent_scanned[file] += nr_taken;
  1284. if (global_reclaim(sc)) {
  1285. if (current_is_kswapd())
  1286. __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
  1287. nr_reclaimed);
  1288. else
  1289. __count_zone_vm_events(PGSTEAL_DIRECT, zone,
  1290. nr_reclaimed);
  1291. }
  1292. putback_inactive_pages(lruvec, &page_list);
  1293. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1294. spin_unlock_irq(&zone->lru_lock);
  1295. free_hot_cold_page_list(&page_list, 1);
  1296. /*
  1297. * If reclaim is isolating dirty pages under writeback, it implies
  1298. * that the long-lived page allocation rate is exceeding the page
  1299. * laundering rate. Either the global limits are not being effective
  1300. * at throttling processes due to the page distribution throughout
  1301. * zones or there is heavy usage of a slow backing device. The
  1302. * only option is to throttle from reclaim context which is not ideal
  1303. * as there is no guarantee the dirtying process is throttled in the
  1304. * same way balance_dirty_pages() manages.
  1305. *
  1306. * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
  1307. * of pages under pages flagged for immediate reclaim and stall if any
  1308. * are encountered in the nr_immediate check below.
  1309. */
  1310. if (nr_writeback && nr_writeback == nr_taken)
  1311. zone_set_flag(zone, ZONE_WRITEBACK);
  1312. /*
  1313. * memcg will stall in page writeback so only consider forcibly
  1314. * stalling for global reclaim
  1315. */
  1316. if (global_reclaim(sc)) {
  1317. /*
  1318. * Tag a zone as congested if all the dirty pages scanned were
  1319. * backed by a congested BDI and wait_iff_congested will stall.
  1320. */
  1321. if (nr_dirty && nr_dirty == nr_congested)
  1322. zone_set_flag(zone, ZONE_CONGESTED);
  1323. /*
  1324. * If dirty pages are scanned that are not queued for IO, it
  1325. * implies that flushers are not keeping up. In this case, flag
  1326. * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
  1327. * pages from reclaim context. It will forcibly stall in the
  1328. * next check.
  1329. */
  1330. if (nr_unqueued_dirty == nr_taken)
  1331. zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
  1332. /*
  1333. * In addition, if kswapd scans pages marked marked for
  1334. * immediate reclaim and under writeback (nr_immediate), it
  1335. * implies that pages are cycling through the LRU faster than
  1336. * they are written so also forcibly stall.
  1337. */
  1338. if (nr_unqueued_dirty == nr_taken || nr_immediate)
  1339. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1340. }
  1341. /*
  1342. * Stall direct reclaim for IO completions if underlying BDIs or zone
  1343. * is congested. Allow kswapd to continue until it starts encountering
  1344. * unqueued dirty pages or cycling through the LRU too quickly.
  1345. */
  1346. if (!sc->hibernation_mode && !current_is_kswapd())
  1347. wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
  1348. trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
  1349. zone_idx(zone),
  1350. nr_scanned, nr_reclaimed,
  1351. sc->priority,
  1352. trace_shrink_flags(file));
  1353. return nr_reclaimed;
  1354. }
  1355. /*
  1356. * This moves pages from the active list to the inactive list.
  1357. *
  1358. * We move them the other way if the page is referenced by one or more
  1359. * processes, from rmap.
  1360. *
  1361. * If the pages are mostly unmapped, the processing is fast and it is
  1362. * appropriate to hold zone->lru_lock across the whole operation. But if
  1363. * the pages are mapped, the processing is slow (page_referenced()) so we
  1364. * should drop zone->lru_lock around each page. It's impossible to balance
  1365. * this, so instead we remove the pages from the LRU while processing them.
  1366. * It is safe to rely on PG_active against the non-LRU pages in here because
  1367. * nobody will play with that bit on a non-LRU page.
  1368. *
  1369. * The downside is that we have to touch page->_count against each page.
  1370. * But we had to alter page->flags anyway.
  1371. */
  1372. static void move_active_pages_to_lru(struct lruvec *lruvec,
  1373. struct list_head *list,
  1374. struct list_head *pages_to_free,
  1375. enum lru_list lru)
  1376. {
  1377. struct zone *zone = lruvec_zone(lruvec);
  1378. unsigned long pgmoved = 0;
  1379. struct page *page;
  1380. int nr_pages;
  1381. while (!list_empty(list)) {
  1382. page = lru_to_page(list);
  1383. lruvec = mem_cgroup_page_lruvec(page, zone);
  1384. VM_BUG_ON(PageLRU(page));
  1385. SetPageLRU(page);
  1386. nr_pages = hpage_nr_pages(page);
  1387. mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
  1388. list_move(&page->lru, &lruvec->lists[lru]);
  1389. pgmoved += nr_pages;
  1390. if (put_page_testzero(page)) {
  1391. __ClearPageLRU(page);
  1392. __ClearPageActive(page);
  1393. del_page_from_lru_list(page, lruvec, lru);
  1394. if (unlikely(PageCompound(page))) {
  1395. spin_unlock_irq(&zone->lru_lock);
  1396. (*get_compound_page_dtor(page))(page);
  1397. spin_lock_irq(&zone->lru_lock);
  1398. } else
  1399. list_add(&page->lru, pages_to_free);
  1400. }
  1401. }
  1402. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1403. if (!is_active_lru(lru))
  1404. __count_vm_events(PGDEACTIVATE, pgmoved);
  1405. }
  1406. static void shrink_active_list(unsigned long nr_to_scan,
  1407. struct lruvec *lruvec,
  1408. struct scan_control *sc,
  1409. enum lru_list lru)
  1410. {
  1411. unsigned long nr_taken;
  1412. unsigned long nr_scanned;
  1413. unsigned long vm_flags;
  1414. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1415. LIST_HEAD(l_active);
  1416. LIST_HEAD(l_inactive);
  1417. struct page *page;
  1418. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1419. unsigned long nr_rotated = 0;
  1420. isolate_mode_t isolate_mode = 0;
  1421. int file = is_file_lru(lru);
  1422. struct zone *zone = lruvec_zone(lruvec);
  1423. lru_add_drain();
  1424. if (!sc->may_unmap)
  1425. isolate_mode |= ISOLATE_UNMAPPED;
  1426. if (!sc->may_writepage)
  1427. isolate_mode |= ISOLATE_CLEAN;
  1428. spin_lock_irq(&zone->lru_lock);
  1429. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
  1430. &nr_scanned, sc, isolate_mode, lru);
  1431. if (global_reclaim(sc))
  1432. zone->pages_scanned += nr_scanned;
  1433. reclaim_stat->recent_scanned[file] += nr_taken;
  1434. __count_zone_vm_events(PGREFILL, zone, nr_scanned);
  1435. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1436. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1437. spin_unlock_irq(&zone->lru_lock);
  1438. while (!list_empty(&l_hold)) {
  1439. cond_resched();
  1440. page = lru_to_page(&l_hold);
  1441. list_del(&page->lru);
  1442. if (unlikely(!page_evictable(page))) {
  1443. putback_lru_page(page);
  1444. continue;
  1445. }
  1446. if (unlikely(buffer_heads_over_limit)) {
  1447. if (page_has_private(page) && trylock_page(page)) {
  1448. if (page_has_private(page))
  1449. try_to_release_page(page, 0);
  1450. unlock_page(page);
  1451. }
  1452. }
  1453. if (page_referenced(page, 0, sc->target_mem_cgroup,
  1454. &vm_flags)) {
  1455. nr_rotated += hpage_nr_pages(page);
  1456. /*
  1457. * Identify referenced, file-backed active pages and
  1458. * give them one more trip around the active list. So
  1459. * that executable code get better chances to stay in
  1460. * memory under moderate memory pressure. Anon pages
  1461. * are not likely to be evicted by use-once streaming
  1462. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1463. * so we ignore them here.
  1464. */
  1465. if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
  1466. list_add(&page->lru, &l_active);
  1467. continue;
  1468. }
  1469. }
  1470. ClearPageActive(page); /* we are de-activating */
  1471. list_add(&page->lru, &l_inactive);
  1472. }
  1473. /*
  1474. * Move pages back to the lru list.
  1475. */
  1476. spin_lock_irq(&zone->lru_lock);
  1477. /*
  1478. * Count referenced pages from currently used mappings as rotated,
  1479. * even though only some of them are actually re-activated. This
  1480. * helps balance scan pressure between file and anonymous pages in
  1481. * get_scan_ratio.
  1482. */
  1483. reclaim_stat->recent_rotated[file] += nr_rotated;
  1484. move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
  1485. move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
  1486. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1487. spin_unlock_irq(&zone->lru_lock);
  1488. free_hot_cold_page_list(&l_hold, 1);
  1489. }
  1490. #ifdef CONFIG_SWAP
  1491. static int inactive_anon_is_low_global(struct zone *zone)
  1492. {
  1493. unsigned long active, inactive;
  1494. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1495. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1496. if (inactive * zone->inactive_ratio < active)
  1497. return 1;
  1498. return 0;
  1499. }
  1500. /**
  1501. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1502. * @lruvec: LRU vector to check
  1503. *
  1504. * Returns true if the zone does not have enough inactive anon pages,
  1505. * meaning some active anon pages need to be deactivated.
  1506. */
  1507. static int inactive_anon_is_low(struct lruvec *lruvec)
  1508. {
  1509. /*
  1510. * If we don't have swap space, anonymous page deactivation
  1511. * is pointless.
  1512. */
  1513. if (!total_swap_pages)
  1514. return 0;
  1515. if (!mem_cgroup_disabled())
  1516. return mem_cgroup_inactive_anon_is_low(lruvec);
  1517. return inactive_anon_is_low_global(lruvec_zone(lruvec));
  1518. }
  1519. #else
  1520. static inline int inactive_anon_is_low(struct lruvec *lruvec)
  1521. {
  1522. return 0;
  1523. }
  1524. #endif
  1525. /**
  1526. * inactive_file_is_low - check if file pages need to be deactivated
  1527. * @lruvec: LRU vector to check
  1528. *
  1529. * When the system is doing streaming IO, memory pressure here
  1530. * ensures that active file pages get deactivated, until more
  1531. * than half of the file pages are on the inactive list.
  1532. *
  1533. * Once we get to that situation, protect the system's working
  1534. * set from being evicted by disabling active file page aging.
  1535. *
  1536. * This uses a different ratio than the anonymous pages, because
  1537. * the page cache uses a use-once replacement algorithm.
  1538. */
  1539. static int inactive_file_is_low(struct lruvec *lruvec)
  1540. {
  1541. unsigned long inactive;
  1542. unsigned long active;
  1543. inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1544. active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
  1545. return active > inactive;
  1546. }
  1547. static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
  1548. {
  1549. if (is_file_lru(lru))
  1550. return inactive_file_is_low(lruvec);
  1551. else
  1552. return inactive_anon_is_low(lruvec);
  1553. }
  1554. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1555. struct lruvec *lruvec, struct scan_control *sc)
  1556. {
  1557. if (is_active_lru(lru)) {
  1558. if (inactive_list_is_low(lruvec, lru))
  1559. shrink_active_list(nr_to_scan, lruvec, sc, lru);
  1560. return 0;
  1561. }
  1562. return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
  1563. }
  1564. static int vmscan_swappiness(struct scan_control *sc)
  1565. {
  1566. if (global_reclaim(sc))
  1567. return vm_swappiness;
  1568. return mem_cgroup_swappiness(sc->target_mem_cgroup);
  1569. }
  1570. enum scan_balance {
  1571. SCAN_EQUAL,
  1572. SCAN_FRACT,
  1573. SCAN_ANON,
  1574. SCAN_FILE,
  1575. };
  1576. /*
  1577. * Determine how aggressively the anon and file LRU lists should be
  1578. * scanned. The relative value of each set of LRU lists is determined
  1579. * by looking at the fraction of the pages scanned we did rotate back
  1580. * onto the active list instead of evict.
  1581. *
  1582. * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
  1583. * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
  1584. */
  1585. static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
  1586. unsigned long *nr)
  1587. {
  1588. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1589. u64 fraction[2];
  1590. u64 denominator = 0; /* gcc */
  1591. struct zone *zone = lruvec_zone(lruvec);
  1592. unsigned long anon_prio, file_prio;
  1593. enum scan_balance scan_balance;
  1594. unsigned long anon, file, free;
  1595. bool force_scan = false;
  1596. unsigned long ap, fp;
  1597. enum lru_list lru;
  1598. /*
  1599. * If the zone or memcg is small, nr[l] can be 0. This
  1600. * results in no scanning on this priority and a potential
  1601. * priority drop. Global direct reclaim can go to the next
  1602. * zone and tends to have no problems. Global kswapd is for
  1603. * zone balancing and it needs to scan a minimum amount. When
  1604. * reclaiming for a memcg, a priority drop can cause high
  1605. * latencies, so it's better to scan a minimum amount there as
  1606. * well.
  1607. */
  1608. if (current_is_kswapd() && !zone_reclaimable(zone))
  1609. force_scan = true;
  1610. if (!global_reclaim(sc))
  1611. force_scan = true;
  1612. /* If we have no swap space, do not bother scanning anon pages. */
  1613. if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
  1614. scan_balance = SCAN_FILE;
  1615. goto out;
  1616. }
  1617. /*
  1618. * Global reclaim will swap to prevent OOM even with no
  1619. * swappiness, but memcg users want to use this knob to
  1620. * disable swapping for individual groups completely when
  1621. * using the memory controller's swap limit feature would be
  1622. * too expensive.
  1623. */
  1624. if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
  1625. scan_balance = SCAN_FILE;
  1626. goto out;
  1627. }
  1628. /*
  1629. * Do not apply any pressure balancing cleverness when the
  1630. * system is close to OOM, scan both anon and file equally
  1631. * (unless the swappiness setting disagrees with swapping).
  1632. */
  1633. if (!sc->priority && vmscan_swappiness(sc)) {
  1634. scan_balance = SCAN_EQUAL;
  1635. goto out;
  1636. }
  1637. anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
  1638. get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1639. file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
  1640. get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1641. /*
  1642. * If it's foreseeable that reclaiming the file cache won't be
  1643. * enough to get the zone back into a desirable shape, we have
  1644. * to swap. Better start now and leave the - probably heavily
  1645. * thrashing - remaining file pages alone.
  1646. */
  1647. if (global_reclaim(sc)) {
  1648. free = zone_page_state(zone, NR_FREE_PAGES);
  1649. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1650. scan_balance = SCAN_ANON;
  1651. goto out;
  1652. }
  1653. }
  1654. /*
  1655. * There is enough inactive page cache, do not reclaim
  1656. * anything from the anonymous working set right now.
  1657. */
  1658. if (!inactive_file_is_low(lruvec)) {
  1659. scan_balance = SCAN_FILE;
  1660. goto out;
  1661. }
  1662. scan_balance = SCAN_FRACT;
  1663. /*
  1664. * With swappiness at 100, anonymous and file have the same priority.
  1665. * This scanning priority is essentially the inverse of IO cost.
  1666. */
  1667. anon_prio = vmscan_swappiness(sc);
  1668. file_prio = 200 - anon_prio;
  1669. /*
  1670. * OK, so we have swap space and a fair amount of page cache
  1671. * pages. We use the recently rotated / recently scanned
  1672. * ratios to determine how valuable each cache is.
  1673. *
  1674. * Because workloads change over time (and to avoid overflow)
  1675. * we keep these statistics as a floating average, which ends
  1676. * up weighing recent references more than old ones.
  1677. *
  1678. * anon in [0], file in [1]
  1679. */
  1680. spin_lock_irq(&zone->lru_lock);
  1681. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1682. reclaim_stat->recent_scanned[0] /= 2;
  1683. reclaim_stat->recent_rotated[0] /= 2;
  1684. }
  1685. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1686. reclaim_stat->recent_scanned[1] /= 2;
  1687. reclaim_stat->recent_rotated[1] /= 2;
  1688. }
  1689. /*
  1690. * The amount of pressure on anon vs file pages is inversely
  1691. * proportional to the fraction of recently scanned pages on
  1692. * each list that were recently referenced and in active use.
  1693. */
  1694. ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
  1695. ap /= reclaim_stat->recent_rotated[0] + 1;
  1696. fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
  1697. fp /= reclaim_stat->recent_rotated[1] + 1;
  1698. spin_unlock_irq(&zone->lru_lock);
  1699. fraction[0] = ap;
  1700. fraction[1] = fp;
  1701. denominator = ap + fp + 1;
  1702. out:
  1703. for_each_evictable_lru(lru) {
  1704. int file = is_file_lru(lru);
  1705. unsigned long size;
  1706. unsigned long scan;
  1707. size = get_lru_size(lruvec, lru);
  1708. scan = size >> sc->priority;
  1709. if (!scan && force_scan)
  1710. scan = min(size, SWAP_CLUSTER_MAX);
  1711. switch (scan_balance) {
  1712. case SCAN_EQUAL:
  1713. /* Scan lists relative to size */
  1714. break;
  1715. case SCAN_FRACT:
  1716. /*
  1717. * Scan types proportional to swappiness and
  1718. * their relative recent reclaim efficiency.
  1719. */
  1720. scan = div64_u64(scan * fraction[file], denominator);
  1721. break;
  1722. case SCAN_FILE:
  1723. case SCAN_ANON:
  1724. /* Scan one type exclusively */
  1725. if ((scan_balance == SCAN_FILE) != file)
  1726. scan = 0;
  1727. break;
  1728. default:
  1729. /* Look ma, no brain */
  1730. BUG();
  1731. }
  1732. nr[lru] = scan;
  1733. }
  1734. }
  1735. /*
  1736. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1737. */
  1738. static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1739. {
  1740. unsigned long nr[NR_LRU_LISTS];
  1741. unsigned long targets[NR_LRU_LISTS];
  1742. unsigned long nr_to_scan;
  1743. enum lru_list lru;
  1744. unsigned long nr_reclaimed = 0;
  1745. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  1746. struct blk_plug plug;
  1747. bool scan_adjusted = false;
  1748. get_scan_count(lruvec, sc, nr);
  1749. /* Record the original scan target for proportional adjustments later */
  1750. memcpy(targets, nr, sizeof(nr));
  1751. blk_start_plug(&plug);
  1752. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1753. nr[LRU_INACTIVE_FILE]) {
  1754. unsigned long nr_anon, nr_file, percentage;
  1755. unsigned long nr_scanned;
  1756. for_each_evictable_lru(lru) {
  1757. if (nr[lru]) {
  1758. nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
  1759. nr[lru] -= nr_to_scan;
  1760. nr_reclaimed += shrink_list(lru, nr_to_scan,
  1761. lruvec, sc);
  1762. }
  1763. }
  1764. if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
  1765. continue;
  1766. /*
  1767. * For global direct reclaim, reclaim only the number of pages
  1768. * requested. Less care is taken to scan proportionally as it
  1769. * is more important to minimise direct reclaim stall latency
  1770. * than it is to properly age the LRU lists.
  1771. */
  1772. if (global_reclaim(sc) && !current_is_kswapd())
  1773. break;
  1774. /*
  1775. * For kswapd and memcg, reclaim at least the number of pages
  1776. * requested. Ensure that the anon and file LRUs shrink
  1777. * proportionally what was requested by get_scan_count(). We
  1778. * stop reclaiming one LRU and reduce the amount scanning
  1779. * proportional to the original scan target.
  1780. */
  1781. nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
  1782. nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
  1783. if (nr_file > nr_anon) {
  1784. unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
  1785. targets[LRU_ACTIVE_ANON] + 1;
  1786. lru = LRU_BASE;
  1787. percentage = nr_anon * 100 / scan_target;
  1788. } else {
  1789. unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
  1790. targets[LRU_ACTIVE_FILE] + 1;
  1791. lru = LRU_FILE;
  1792. percentage = nr_file * 100 / scan_target;
  1793. }
  1794. /* Stop scanning the smaller of the LRU */
  1795. nr[lru] = 0;
  1796. nr[lru + LRU_ACTIVE] = 0;
  1797. /*
  1798. * Recalculate the other LRU scan count based on its original
  1799. * scan target and the percentage scanning already complete
  1800. */
  1801. lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
  1802. nr_scanned = targets[lru] - nr[lru];
  1803. nr[lru] = targets[lru] * (100 - percentage) / 100;
  1804. nr[lru] -= min(nr[lru], nr_scanned);
  1805. lru += LRU_ACTIVE;
  1806. nr_scanned = targets[lru] - nr[lru];
  1807. nr[lru] = targets[lru] * (100 - percentage) / 100;
  1808. nr[lru] -= min(nr[lru], nr_scanned);
  1809. scan_adjusted = true;
  1810. }
  1811. blk_finish_plug(&plug);
  1812. sc->nr_reclaimed += nr_reclaimed;
  1813. /*
  1814. * Even if we did not try to evict anon pages at all, we want to
  1815. * rebalance the anon lru active/inactive ratio.
  1816. */
  1817. if (inactive_anon_is_low(lruvec))
  1818. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  1819. sc, LRU_ACTIVE_ANON);
  1820. throttle_vm_writeout(sc->gfp_mask);
  1821. }
  1822. /* Use reclaim/compaction for costly allocs or under memory pressure */
  1823. static bool in_reclaim_compaction(struct scan_control *sc)
  1824. {
  1825. if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
  1826. (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
  1827. sc->priority < DEF_PRIORITY - 2))
  1828. return true;
  1829. return false;
  1830. }
  1831. /*
  1832. * Reclaim/compaction is used for high-order allocation requests. It reclaims
  1833. * order-0 pages before compacting the zone. should_continue_reclaim() returns
  1834. * true if more pages should be reclaimed such that when the page allocator
  1835. * calls try_to_compact_zone() that it will have enough free pages to succeed.
  1836. * It will give up earlier than that if there is difficulty reclaiming pages.
  1837. */
  1838. static inline bool should_continue_reclaim(struct zone *zone,
  1839. unsigned long nr_reclaimed,
  1840. unsigned long nr_scanned,
  1841. struct scan_control *sc)
  1842. {
  1843. unsigned long pages_for_compaction;
  1844. unsigned long inactive_lru_pages;
  1845. /* If not in reclaim/compaction mode, stop */
  1846. if (!in_reclaim_compaction(sc))
  1847. return false;
  1848. /* Consider stopping depending on scan and reclaim activity */
  1849. if (sc->gfp_mask & __GFP_REPEAT) {
  1850. /*
  1851. * For __GFP_REPEAT allocations, stop reclaiming if the
  1852. * full LRU list has been scanned and we are still failing
  1853. * to reclaim pages. This full LRU scan is potentially
  1854. * expensive but a __GFP_REPEAT caller really wants to succeed
  1855. */
  1856. if (!nr_reclaimed && !nr_scanned)
  1857. return false;
  1858. } else {
  1859. /*
  1860. * For non-__GFP_REPEAT allocations which can presumably
  1861. * fail without consequence, stop if we failed to reclaim
  1862. * any pages from the last SWAP_CLUSTER_MAX number of
  1863. * pages that were scanned. This will return to the
  1864. * caller faster at the risk reclaim/compaction and
  1865. * the resulting allocation attempt fails
  1866. */
  1867. if (!nr_reclaimed)
  1868. return false;
  1869. }
  1870. /*
  1871. * If we have not reclaimed enough pages for compaction and the
  1872. * inactive lists are large enough, continue reclaiming
  1873. */
  1874. pages_for_compaction = (2UL << sc->order);
  1875. inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
  1876. if (get_nr_swap_pages() > 0)
  1877. inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
  1878. if (sc->nr_reclaimed < pages_for_compaction &&
  1879. inactive_lru_pages > pages_for_compaction)
  1880. return true;
  1881. /* If compaction would go ahead or the allocation would succeed, stop */
  1882. switch (compaction_suitable(zone, sc->order)) {
  1883. case COMPACT_PARTIAL:
  1884. case COMPACT_CONTINUE:
  1885. return false;
  1886. default:
  1887. return true;
  1888. }
  1889. }
  1890. static void shrink_zone(struct zone *zone, struct scan_control *sc)
  1891. {
  1892. unsigned long nr_reclaimed, nr_scanned;
  1893. do {
  1894. struct mem_cgroup *root = sc->target_mem_cgroup;
  1895. struct mem_cgroup_reclaim_cookie reclaim = {
  1896. .zone = zone,
  1897. .priority = sc->priority,
  1898. };
  1899. struct mem_cgroup *memcg;
  1900. nr_reclaimed = sc->nr_reclaimed;
  1901. nr_scanned = sc->nr_scanned;
  1902. memcg = mem_cgroup_iter(root, NULL, &reclaim);
  1903. do {
  1904. struct lruvec *lruvec;
  1905. lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  1906. shrink_lruvec(lruvec, sc);
  1907. /*
  1908. * Direct reclaim and kswapd have to scan all memory
  1909. * cgroups to fulfill the overall scan target for the
  1910. * zone.
  1911. *
  1912. * Limit reclaim, on the other hand, only cares about
  1913. * nr_to_reclaim pages to be reclaimed and it will
  1914. * retry with decreasing priority if one round over the
  1915. * whole hierarchy is not sufficient.
  1916. */
  1917. if (!global_reclaim(sc) &&
  1918. sc->nr_reclaimed >= sc->nr_to_reclaim) {
  1919. mem_cgroup_iter_break(root, memcg);
  1920. break;
  1921. }
  1922. memcg = mem_cgroup_iter(root, memcg, &reclaim);
  1923. } while (memcg);
  1924. vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
  1925. sc->nr_scanned - nr_scanned,
  1926. sc->nr_reclaimed - nr_reclaimed);
  1927. } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
  1928. sc->nr_scanned - nr_scanned, sc));
  1929. }
  1930. /* Returns true if compaction should go ahead for a high-order request */
  1931. static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  1932. {
  1933. unsigned long balance_gap, watermark;
  1934. bool watermark_ok;
  1935. /* Do not consider compaction for orders reclaim is meant to satisfy */
  1936. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
  1937. return false;
  1938. /*
  1939. * Compaction takes time to run and there are potentially other
  1940. * callers using the pages just freed. Continue reclaiming until
  1941. * there is a buffer of free pages available to give compaction
  1942. * a reasonable chance of completing and allocating the page
  1943. */
  1944. balance_gap = min(low_wmark_pages(zone),
  1945. (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  1946. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  1947. watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
  1948. watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
  1949. /*
  1950. * If compaction is deferred, reclaim up to a point where
  1951. * compaction will have a chance of success when re-enabled
  1952. */
  1953. if (compaction_deferred(zone, sc->order))
  1954. return watermark_ok;
  1955. /* If compaction is not ready to start, keep reclaiming */
  1956. if (!compaction_suitable(zone, sc->order))
  1957. return false;
  1958. return watermark_ok;
  1959. }
  1960. /*
  1961. * This is the direct reclaim path, for page-allocating processes. We only
  1962. * try to reclaim pages from zones which will satisfy the caller's allocation
  1963. * request.
  1964. *
  1965. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1966. * Because:
  1967. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1968. * allocation or
  1969. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1970. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1971. * zone defense algorithm.
  1972. *
  1973. * If a zone is deemed to be full of pinned pages then just give it a light
  1974. * scan then give up on it.
  1975. *
  1976. * This function returns true if a zone is being reclaimed for a costly
  1977. * high-order allocation and compaction is ready to begin. This indicates to
  1978. * the caller that it should consider retrying the allocation instead of
  1979. * further reclaim.
  1980. */
  1981. static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
  1982. {
  1983. struct zoneref *z;
  1984. struct zone *zone;
  1985. unsigned long nr_soft_reclaimed;
  1986. unsigned long nr_soft_scanned;
  1987. bool aborted_reclaim = false;
  1988. /*
  1989. * If the number of buffer_heads in the machine exceeds the maximum
  1990. * allowed level, force direct reclaim to scan the highmem zone as
  1991. * highmem pages could be pinning lowmem pages storing buffer_heads
  1992. */
  1993. if (buffer_heads_over_limit)
  1994. sc->gfp_mask |= __GFP_HIGHMEM;
  1995. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1996. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1997. if (!populated_zone(zone))
  1998. continue;
  1999. /*
  2000. * Take care memory controller reclaiming has small influence
  2001. * to global LRU.
  2002. */
  2003. if (global_reclaim(sc)) {
  2004. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2005. continue;
  2006. if (sc->priority != DEF_PRIORITY &&
  2007. !zone_reclaimable(zone))
  2008. continue; /* Let kswapd poll it */
  2009. if (IS_ENABLED(CONFIG_COMPACTION)) {
  2010. /*
  2011. * If we already have plenty of memory free for
  2012. * compaction in this zone, don't free any more.
  2013. * Even though compaction is invoked for any
  2014. * non-zero order, only frequent costly order
  2015. * reclamation is disruptive enough to become a
  2016. * noticeable problem, like transparent huge
  2017. * page allocations.
  2018. */
  2019. if (compaction_ready(zone, sc)) {
  2020. aborted_reclaim = true;
  2021. continue;
  2022. }
  2023. }
  2024. /*
  2025. * This steals pages from memory cgroups over softlimit
  2026. * and returns the number of reclaimed pages and
  2027. * scanned pages. This works for global memory pressure
  2028. * and balancing, not for a memcg's limit.
  2029. */
  2030. nr_soft_scanned = 0;
  2031. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2032. sc->order, sc->gfp_mask,
  2033. &nr_soft_scanned);
  2034. sc->nr_reclaimed += nr_soft_reclaimed;
  2035. sc->nr_scanned += nr_soft_scanned;
  2036. /* need some check for avoid more shrink_zone() */
  2037. }
  2038. shrink_zone(zone, sc);
  2039. }
  2040. return aborted_reclaim;
  2041. }
  2042. /* All zones in zonelist are unreclaimable? */
  2043. static bool all_unreclaimable(struct zonelist *zonelist,
  2044. struct scan_control *sc)
  2045. {
  2046. struct zoneref *z;
  2047. struct zone *zone;
  2048. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  2049. gfp_zone(sc->gfp_mask), sc->nodemask) {
  2050. if (!populated_zone(zone))
  2051. continue;
  2052. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2053. continue;
  2054. if (zone_reclaimable(zone))
  2055. return false;
  2056. }
  2057. return true;
  2058. }
  2059. /*
  2060. * This is the main entry point to direct page reclaim.
  2061. *
  2062. * If a full scan of the inactive list fails to free enough memory then we
  2063. * are "out of memory" and something needs to be killed.
  2064. *
  2065. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  2066. * high - the zone may be full of dirty or under-writeback pages, which this
  2067. * caller can't do much about. We kick the writeback threads and take explicit
  2068. * naps in the hope that some of these pages can be written. But if the
  2069. * allocating task holds filesystem locks which prevent writeout this might not
  2070. * work, and the allocation attempt will fail.
  2071. *
  2072. * returns: 0, if no pages reclaimed
  2073. * else, the number of pages reclaimed
  2074. */
  2075. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  2076. struct scan_control *sc,
  2077. struct shrink_control *shrink)
  2078. {
  2079. unsigned long total_scanned = 0;
  2080. struct reclaim_state *reclaim_state = current->reclaim_state;
  2081. struct zoneref *z;
  2082. struct zone *zone;
  2083. unsigned long writeback_threshold;
  2084. bool aborted_reclaim;
  2085. delayacct_freepages_start();
  2086. if (global_reclaim(sc))
  2087. count_vm_event(ALLOCSTALL);
  2088. do {
  2089. vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
  2090. sc->priority);
  2091. sc->nr_scanned = 0;
  2092. aborted_reclaim = shrink_zones(zonelist, sc);
  2093. /*
  2094. * Don't shrink slabs when reclaiming memory from over limit
  2095. * cgroups but do shrink slab at least once when aborting
  2096. * reclaim for compaction to avoid unevenly scanning file/anon
  2097. * LRU pages over slab pages.
  2098. */
  2099. if (global_reclaim(sc)) {
  2100. unsigned long lru_pages = 0;
  2101. nodes_clear(shrink->nodes_to_scan);
  2102. for_each_zone_zonelist(zone, z, zonelist,
  2103. gfp_zone(sc->gfp_mask)) {
  2104. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2105. continue;
  2106. lru_pages += zone_reclaimable_pages(zone);
  2107. node_set(zone_to_nid(zone),
  2108. shrink->nodes_to_scan);
  2109. }
  2110. shrink_slab(shrink, sc->nr_scanned, lru_pages);
  2111. if (reclaim_state) {
  2112. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  2113. reclaim_state->reclaimed_slab = 0;
  2114. }
  2115. }
  2116. total_scanned += sc->nr_scanned;
  2117. if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  2118. goto out;
  2119. /*
  2120. * If we're getting trouble reclaiming, start doing
  2121. * writepage even in laptop mode.
  2122. */
  2123. if (sc->priority < DEF_PRIORITY - 2)
  2124. sc->may_writepage = 1;
  2125. /*
  2126. * Try to write back as many pages as we just scanned. This
  2127. * tends to cause slow streaming writers to write data to the
  2128. * disk smoothly, at the dirtying rate, which is nice. But
  2129. * that's undesirable in laptop mode, where we *want* lumpy
  2130. * writeout. So in laptop mode, write out the whole world.
  2131. */
  2132. writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  2133. if (total_scanned > writeback_threshold) {
  2134. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
  2135. WB_REASON_TRY_TO_FREE_PAGES);
  2136. sc->may_writepage = 1;
  2137. }
  2138. } while (--sc->priority >= 0 && !aborted_reclaim);
  2139. out:
  2140. delayacct_freepages_end();
  2141. if (sc->nr_reclaimed)
  2142. return sc->nr_reclaimed;
  2143. /*
  2144. * As hibernation is going on, kswapd is freezed so that it can't mark
  2145. * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
  2146. * check.
  2147. */
  2148. if (oom_killer_disabled)
  2149. return 0;
  2150. /* Aborted reclaim to try compaction? don't OOM, then */
  2151. if (aborted_reclaim)
  2152. return 1;
  2153. /* top priority shrink_zones still had more to do? don't OOM, then */
  2154. if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
  2155. return 1;
  2156. return 0;
  2157. }
  2158. static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
  2159. {
  2160. struct zone *zone;
  2161. unsigned long pfmemalloc_reserve = 0;
  2162. unsigned long free_pages = 0;
  2163. int i;
  2164. bool wmark_ok;
  2165. for (i = 0; i <= ZONE_NORMAL; i++) {
  2166. zone = &pgdat->node_zones[i];
  2167. pfmemalloc_reserve += min_wmark_pages(zone);
  2168. free_pages += zone_page_state(zone, NR_FREE_PAGES);
  2169. }
  2170. wmark_ok = free_pages > pfmemalloc_reserve / 2;
  2171. /* kswapd must be awake if processes are being throttled */
  2172. if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
  2173. pgdat->classzone_idx = min(pgdat->classzone_idx,
  2174. (enum zone_type)ZONE_NORMAL);
  2175. wake_up_interruptible(&pgdat->kswapd_wait);
  2176. }
  2177. return wmark_ok;
  2178. }
  2179. /*
  2180. * Throttle direct reclaimers if backing storage is backed by the network
  2181. * and the PFMEMALLOC reserve for the preferred node is getting dangerously
  2182. * depleted. kswapd will continue to make progress and wake the processes
  2183. * when the low watermark is reached.
  2184. *
  2185. * Returns true if a fatal signal was delivered during throttling. If this
  2186. * happens, the page allocator should not consider triggering the OOM killer.
  2187. */
  2188. static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
  2189. nodemask_t *nodemask)
  2190. {
  2191. struct zone *zone;
  2192. int high_zoneidx = gfp_zone(gfp_mask);
  2193. pg_data_t *pgdat;
  2194. /*
  2195. * Kernel threads should not be throttled as they may be indirectly
  2196. * responsible for cleaning pages necessary for reclaim to make forward
  2197. * progress. kjournald for example may enter direct reclaim while
  2198. * committing a transaction where throttling it could forcing other
  2199. * processes to block on log_wait_commit().
  2200. */
  2201. if (current->flags & PF_KTHREAD)
  2202. goto out;
  2203. /*
  2204. * If a fatal signal is pending, this process should not throttle.
  2205. * It should return quickly so it can exit and free its memory
  2206. */
  2207. if (fatal_signal_pending(current))
  2208. goto out;
  2209. /* Check if the pfmemalloc reserves are ok */
  2210. first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
  2211. pgdat = zone->zone_pgdat;
  2212. if (pfmemalloc_watermark_ok(pgdat))
  2213. goto out;
  2214. /* Account for the throttling */
  2215. count_vm_event(PGSCAN_DIRECT_THROTTLE);
  2216. /*
  2217. * If the caller cannot enter the filesystem, it's possible that it
  2218. * is due to the caller holding an FS lock or performing a journal
  2219. * transaction in the case of a filesystem like ext[3|4]. In this case,
  2220. * it is not safe to block on pfmemalloc_wait as kswapd could be
  2221. * blocked waiting on the same lock. Instead, throttle for up to a
  2222. * second before continuing.
  2223. */
  2224. if (!(gfp_mask & __GFP_FS)) {
  2225. wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
  2226. pfmemalloc_watermark_ok(pgdat), HZ);
  2227. goto check_pending;
  2228. }
  2229. /* Throttle until kswapd wakes the process */
  2230. wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
  2231. pfmemalloc_watermark_ok(pgdat));
  2232. check_pending:
  2233. if (fatal_signal_pending(current))
  2234. return true;
  2235. out:
  2236. return false;
  2237. }
  2238. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  2239. gfp_t gfp_mask, nodemask_t *nodemask)
  2240. {
  2241. unsigned long nr_reclaimed;
  2242. struct scan_control sc = {
  2243. .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
  2244. .may_writepage = !laptop_mode,
  2245. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2246. .may_unmap = 1,
  2247. .may_swap = 1,
  2248. .order = order,
  2249. .priority = DEF_PRIORITY,
  2250. .target_mem_cgroup = NULL,
  2251. .nodemask = nodemask,
  2252. };
  2253. struct shrink_control shrink = {
  2254. .gfp_mask = sc.gfp_mask,
  2255. };
  2256. /*
  2257. * Do not enter reclaim if fatal signal was delivered while throttled.
  2258. * 1 is returned so that the page allocator does not OOM kill at this
  2259. * point.
  2260. */
  2261. if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
  2262. return 1;
  2263. trace_mm_vmscan_direct_reclaim_begin(order,
  2264. sc.may_writepage,
  2265. gfp_mask);
  2266. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2267. trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
  2268. return nr_reclaimed;
  2269. }
  2270. #ifdef CONFIG_MEMCG
  2271. unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
  2272. gfp_t gfp_mask, bool noswap,
  2273. struct zone *zone,
  2274. unsigned long *nr_scanned)
  2275. {
  2276. struct scan_control sc = {
  2277. .nr_scanned = 0,
  2278. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2279. .may_writepage = !laptop_mode,
  2280. .may_unmap = 1,
  2281. .may_swap = !noswap,
  2282. .order = 0,
  2283. .priority = 0,
  2284. .target_mem_cgroup = memcg,
  2285. };
  2286. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  2287. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2288. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  2289. trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
  2290. sc.may_writepage,
  2291. sc.gfp_mask);
  2292. /*
  2293. * NOTE: Although we can get the priority field, using it
  2294. * here is not a good idea, since it limits the pages we can scan.
  2295. * if we don't reclaim here, the shrink_zone from balance_pgdat
  2296. * will pick up pages from other mem cgroup's as well. We hack
  2297. * the priority and make it zero.
  2298. */
  2299. shrink_lruvec(lruvec, &sc);
  2300. trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
  2301. *nr_scanned = sc.nr_scanned;
  2302. return sc.nr_reclaimed;
  2303. }
  2304. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  2305. gfp_t gfp_mask,
  2306. bool noswap)
  2307. {
  2308. struct zonelist *zonelist;
  2309. unsigned long nr_reclaimed;
  2310. int nid;
  2311. struct scan_control sc = {
  2312. .may_writepage = !laptop_mode,
  2313. .may_unmap = 1,
  2314. .may_swap = !noswap,
  2315. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  2316. .order = 0,
  2317. .priority = DEF_PRIORITY,
  2318. .target_mem_cgroup = memcg,
  2319. .nodemask = NULL, /* we don't care the placement */
  2320. .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  2321. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
  2322. };
  2323. struct shrink_control shrink = {
  2324. .gfp_mask = sc.gfp_mask,
  2325. };
  2326. /*
  2327. * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
  2328. * take care of from where we get pages. So the node where we start the
  2329. * scan does not need to be the current node.
  2330. */
  2331. nid = mem_cgroup_select_victim_node(memcg);
  2332. zonelist = NODE_DATA(nid)->node_zonelists;
  2333. trace_mm_vmscan_memcg_reclaim_begin(0,
  2334. sc.may_writepage,
  2335. sc.gfp_mask);
  2336. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2337. trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
  2338. return nr_reclaimed;
  2339. }
  2340. #endif
  2341. static void age_active_anon(struct zone *zone, struct scan_control *sc)
  2342. {
  2343. struct mem_cgroup *memcg;
  2344. if (!total_swap_pages)
  2345. return;
  2346. memcg = mem_cgroup_iter(NULL, NULL, NULL);
  2347. do {
  2348. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  2349. if (inactive_anon_is_low(lruvec))
  2350. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  2351. sc, LRU_ACTIVE_ANON);
  2352. memcg = mem_cgroup_iter(NULL, memcg, NULL);
  2353. } while (memcg);
  2354. }
  2355. static bool zone_balanced(struct zone *zone, int order,
  2356. unsigned long balance_gap, int classzone_idx)
  2357. {
  2358. if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
  2359. balance_gap, classzone_idx, 0))
  2360. return false;
  2361. if (IS_ENABLED(CONFIG_COMPACTION) && order &&
  2362. !compaction_suitable(zone, order))
  2363. return false;
  2364. return true;
  2365. }
  2366. /*
  2367. * pgdat_balanced() is used when checking if a node is balanced.
  2368. *
  2369. * For order-0, all zones must be balanced!
  2370. *
  2371. * For high-order allocations only zones that meet watermarks and are in a
  2372. * zone allowed by the callers classzone_idx are added to balanced_pages. The
  2373. * total of balanced pages must be at least 25% of the zones allowed by
  2374. * classzone_idx for the node to be considered balanced. Forcing all zones to
  2375. * be balanced for high orders can cause excessive reclaim when there are
  2376. * imbalanced zones.
  2377. * The choice of 25% is due to
  2378. * o a 16M DMA zone that is balanced will not balance a zone on any
  2379. * reasonable sized machine
  2380. * o On all other machines, the top zone must be at least a reasonable
  2381. * percentage of the middle zones. For example, on 32-bit x86, highmem
  2382. * would need to be at least 256M for it to be balance a whole node.
  2383. * Similarly, on x86-64 the Normal zone would need to be at least 1G
  2384. * to balance a node on its own. These seemed like reasonable ratios.
  2385. */
  2386. static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
  2387. {
  2388. unsigned long managed_pages = 0;
  2389. unsigned long balanced_pages = 0;
  2390. int i;
  2391. /* Check the watermark levels */
  2392. for (i = 0; i <= classzone_idx; i++) {
  2393. struct zone *zone = pgdat->node_zones + i;
  2394. if (!populated_zone(zone))
  2395. continue;
  2396. managed_pages += zone->managed_pages;
  2397. /*
  2398. * A special case here:
  2399. *
  2400. * balance_pgdat() skips over all_unreclaimable after
  2401. * DEF_PRIORITY. Effectively, it considers them balanced so
  2402. * they must be considered balanced here as well!
  2403. */
  2404. if (!zone_reclaimable(zone)) {
  2405. balanced_pages += zone->managed_pages;
  2406. continue;
  2407. }
  2408. if (zone_balanced(zone, order, 0, i))
  2409. balanced_pages += zone->managed_pages;
  2410. else if (!order)
  2411. return false;
  2412. }
  2413. if (order)
  2414. return balanced_pages >= (managed_pages >> 2);
  2415. else
  2416. return true;
  2417. }
  2418. /*
  2419. * Prepare kswapd for sleeping. This verifies that there are no processes
  2420. * waiting in throttle_direct_reclaim() and that watermarks have been met.
  2421. *
  2422. * Returns true if kswapd is ready to sleep
  2423. */
  2424. static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
  2425. int classzone_idx)
  2426. {
  2427. /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
  2428. if (remaining)
  2429. return false;
  2430. /*
  2431. * There is a potential race between when kswapd checks its watermarks
  2432. * and a process gets throttled. There is also a potential race if
  2433. * processes get throttled, kswapd wakes, a large process exits therby
  2434. * balancing the zones that causes kswapd to miss a wakeup. If kswapd
  2435. * is going to sleep, no process should be sleeping on pfmemalloc_wait
  2436. * so wake them now if necessary. If necessary, processes will wake
  2437. * kswapd and get throttled again
  2438. */
  2439. if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
  2440. wake_up(&pgdat->pfmemalloc_wait);
  2441. return false;
  2442. }
  2443. return pgdat_balanced(pgdat, order, classzone_idx);
  2444. }
  2445. /*
  2446. * kswapd shrinks the zone by the number of pages required to reach
  2447. * the high watermark.
  2448. *
  2449. * Returns true if kswapd scanned at least the requested number of pages to
  2450. * reclaim or if the lack of progress was due to pages under writeback.
  2451. * This is used to determine if the scanning priority needs to be raised.
  2452. */
  2453. static bool kswapd_shrink_zone(struct zone *zone,
  2454. int classzone_idx,
  2455. struct scan_control *sc,
  2456. unsigned long lru_pages,
  2457. unsigned long *nr_attempted)
  2458. {
  2459. int testorder = sc->order;
  2460. unsigned long balance_gap;
  2461. struct reclaim_state *reclaim_state = current->reclaim_state;
  2462. struct shrink_control shrink = {
  2463. .gfp_mask = sc->gfp_mask,
  2464. };
  2465. bool lowmem_pressure;
  2466. /* Reclaim above the high watermark. */
  2467. sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
  2468. /*
  2469. * Kswapd reclaims only single pages with compaction enabled. Trying
  2470. * too hard to reclaim until contiguous free pages have become
  2471. * available can hurt performance by evicting too much useful data
  2472. * from memory. Do not reclaim more than needed for compaction.
  2473. */
  2474. if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
  2475. compaction_suitable(zone, sc->order) !=
  2476. COMPACT_SKIPPED)
  2477. testorder = 0;
  2478. /*
  2479. * We put equal pressure on every zone, unless one zone has way too
  2480. * many pages free already. The "too many pages" is defined as the
  2481. * high wmark plus a "gap" where the gap is either the low
  2482. * watermark or 1% of the zone, whichever is smaller.
  2483. */
  2484. balance_gap = min(low_wmark_pages(zone),
  2485. (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  2486. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  2487. /*
  2488. * If there is no low memory pressure or the zone is balanced then no
  2489. * reclaim is necessary
  2490. */
  2491. lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
  2492. if (!lowmem_pressure && zone_balanced(zone, testorder,
  2493. balance_gap, classzone_idx))
  2494. return true;
  2495. shrink_zone(zone, sc);
  2496. nodes_clear(shrink.nodes_to_scan);
  2497. node_set(zone_to_nid(zone), shrink.nodes_to_scan);
  2498. reclaim_state->reclaimed_slab = 0;
  2499. shrink_slab(&shrink, sc->nr_scanned, lru_pages);
  2500. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  2501. /* Account for the number of pages attempted to reclaim */
  2502. *nr_attempted += sc->nr_to_reclaim;
  2503. zone_clear_flag(zone, ZONE_WRITEBACK);
  2504. /*
  2505. * If a zone reaches its high watermark, consider it to be no longer
  2506. * congested. It's possible there are dirty pages backed by congested
  2507. * BDIs but as pressure is relieved, speculatively avoid congestion
  2508. * waits.
  2509. */
  2510. if (zone_reclaimable(zone) &&
  2511. zone_balanced(zone, testorder, 0, classzone_idx)) {
  2512. zone_clear_flag(zone, ZONE_CONGESTED);
  2513. zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
  2514. }
  2515. return sc->nr_scanned >= sc->nr_to_reclaim;
  2516. }
  2517. /*
  2518. * For kswapd, balance_pgdat() will work across all this node's zones until
  2519. * they are all at high_wmark_pages(zone).
  2520. *
  2521. * Returns the final order kswapd was reclaiming at
  2522. *
  2523. * There is special handling here for zones which are full of pinned pages.
  2524. * This can happen if the pages are all mlocked, or if they are all used by
  2525. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  2526. * What we do is to detect the case where all pages in the zone have been
  2527. * scanned twice and there has been zero successful reclaim. Mark the zone as
  2528. * dead and from now on, only perform a short scan. Basically we're polling
  2529. * the zone for when the problem goes away.
  2530. *
  2531. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  2532. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  2533. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  2534. * lower zones regardless of the number of free pages in the lower zones. This
  2535. * interoperates with the page allocator fallback scheme to ensure that aging
  2536. * of pages is balanced across the zones.
  2537. */
  2538. static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  2539. int *classzone_idx)
  2540. {
  2541. int i;
  2542. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  2543. unsigned long nr_soft_reclaimed;
  2544. unsigned long nr_soft_scanned;
  2545. struct scan_control sc = {
  2546. .gfp_mask = GFP_KERNEL,
  2547. .priority = DEF_PRIORITY,
  2548. .may_unmap = 1,
  2549. .may_swap = 1,
  2550. .may_writepage = !laptop_mode,
  2551. .order = order,
  2552. .target_mem_cgroup = NULL,
  2553. };
  2554. count_vm_event(PAGEOUTRUN);
  2555. do {
  2556. unsigned long lru_pages = 0;
  2557. unsigned long nr_attempted = 0;
  2558. bool raise_priority = true;
  2559. bool pgdat_needs_compaction = (order > 0);
  2560. sc.nr_reclaimed = 0;
  2561. /*
  2562. * Scan in the highmem->dma direction for the highest
  2563. * zone which needs scanning
  2564. */
  2565. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  2566. struct zone *zone = pgdat->node_zones + i;
  2567. if (!populated_zone(zone))
  2568. continue;
  2569. if (sc.priority != DEF_PRIORITY &&
  2570. !zone_reclaimable(zone))
  2571. continue;
  2572. /*
  2573. * Do some background aging of the anon list, to give
  2574. * pages a chance to be referenced before reclaiming.
  2575. */
  2576. age_active_anon(zone, &sc);
  2577. /*
  2578. * If the number of buffer_heads in the machine
  2579. * exceeds the maximum allowed level and this node
  2580. * has a highmem zone, force kswapd to reclaim from
  2581. * it to relieve lowmem pressure.
  2582. */
  2583. if (buffer_heads_over_limit && is_highmem_idx(i)) {
  2584. end_zone = i;
  2585. break;
  2586. }
  2587. if (!zone_balanced(zone, order, 0, 0)) {
  2588. end_zone = i;
  2589. break;
  2590. } else {
  2591. /*
  2592. * If balanced, clear the dirty and congested
  2593. * flags
  2594. */
  2595. zone_clear_flag(zone, ZONE_CONGESTED);
  2596. zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
  2597. }
  2598. }
  2599. if (i < 0)
  2600. goto out;
  2601. for (i = 0; i <= end_zone; i++) {
  2602. struct zone *zone = pgdat->node_zones + i;
  2603. if (!populated_zone(zone))
  2604. continue;
  2605. lru_pages += zone_reclaimable_pages(zone);
  2606. /*
  2607. * If any zone is currently balanced then kswapd will
  2608. * not call compaction as it is expected that the
  2609. * necessary pages are already available.
  2610. */
  2611. if (pgdat_needs_compaction &&
  2612. zone_watermark_ok(zone, order,
  2613. low_wmark_pages(zone),
  2614. *classzone_idx, 0))
  2615. pgdat_needs_compaction = false;
  2616. }
  2617. /*
  2618. * If we're getting trouble reclaiming, start doing writepage
  2619. * even in laptop mode.
  2620. */
  2621. if (sc.priority < DEF_PRIORITY - 2)
  2622. sc.may_writepage = 1;
  2623. /*
  2624. * Now scan the zone in the dma->highmem direction, stopping
  2625. * at the last zone which needs scanning.
  2626. *
  2627. * We do this because the page allocator works in the opposite
  2628. * direction. This prevents the page allocator from allocating
  2629. * pages behind kswapd's direction of progress, which would
  2630. * cause too much scanning of the lower zones.
  2631. */
  2632. for (i = 0; i <= end_zone; i++) {
  2633. struct zone *zone = pgdat->node_zones + i;
  2634. if (!populated_zone(zone))
  2635. continue;
  2636. if (sc.priority != DEF_PRIORITY &&
  2637. !zone_reclaimable(zone))
  2638. continue;
  2639. sc.nr_scanned = 0;
  2640. nr_soft_scanned = 0;
  2641. /*
  2642. * Call soft limit reclaim before calling shrink_zone.
  2643. */
  2644. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2645. order, sc.gfp_mask,
  2646. &nr_soft_scanned);
  2647. sc.nr_reclaimed += nr_soft_reclaimed;
  2648. /*
  2649. * There should be no need to raise the scanning
  2650. * priority if enough pages are already being scanned
  2651. * that that high watermark would be met at 100%
  2652. * efficiency.
  2653. */
  2654. if (kswapd_shrink_zone(zone, end_zone, &sc,
  2655. lru_pages, &nr_attempted))
  2656. raise_priority = false;
  2657. }
  2658. /*
  2659. * If the low watermark is met there is no need for processes
  2660. * to be throttled on pfmemalloc_wait as they should not be
  2661. * able to safely make forward progress. Wake them
  2662. */
  2663. if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
  2664. pfmemalloc_watermark_ok(pgdat))
  2665. wake_up(&pgdat->pfmemalloc_wait);
  2666. /*
  2667. * Fragmentation may mean that the system cannot be rebalanced
  2668. * for high-order allocations in all zones. If twice the
  2669. * allocation size has been reclaimed and the zones are still
  2670. * not balanced then recheck the watermarks at order-0 to
  2671. * prevent kswapd reclaiming excessively. Assume that a
  2672. * process requested a high-order can direct reclaim/compact.
  2673. */
  2674. if (order && sc.nr_reclaimed >= 2UL << order)
  2675. order = sc.order = 0;
  2676. /* Check if kswapd should be suspending */
  2677. if (try_to_freeze() || kthread_should_stop())
  2678. break;
  2679. /*
  2680. * Compact if necessary and kswapd is reclaiming at least the
  2681. * high watermark number of pages as requsted
  2682. */
  2683. if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
  2684. compact_pgdat(pgdat, order);
  2685. /*
  2686. * Raise priority if scanning rate is too low or there was no
  2687. * progress in reclaiming pages
  2688. */
  2689. if (raise_priority || !sc.nr_reclaimed)
  2690. sc.priority--;
  2691. } while (sc.priority >= 1 &&
  2692. !pgdat_balanced(pgdat, order, *classzone_idx));
  2693. out:
  2694. /*
  2695. * Return the order we were reclaiming at so prepare_kswapd_sleep()
  2696. * makes a decision on the order we were last reclaiming at. However,
  2697. * if another caller entered the allocator slow path while kswapd
  2698. * was awake, order will remain at the higher level
  2699. */
  2700. *classzone_idx = end_zone;
  2701. return order;
  2702. }
  2703. static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  2704. {
  2705. long remaining = 0;
  2706. DEFINE_WAIT(wait);
  2707. if (freezing(current) || kthread_should_stop())
  2708. return;
  2709. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2710. /* Try to sleep for a short interval */
  2711. if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
  2712. remaining = schedule_timeout(HZ/10);
  2713. finish_wait(&pgdat->kswapd_wait, &wait);
  2714. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2715. }
  2716. /*
  2717. * After a short sleep, check if it was a premature sleep. If not, then
  2718. * go fully to sleep until explicitly woken up.
  2719. */
  2720. if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
  2721. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  2722. /*
  2723. * vmstat counters are not perfectly accurate and the estimated
  2724. * value for counters such as NR_FREE_PAGES can deviate from the
  2725. * true value by nr_online_cpus * threshold. To avoid the zone
  2726. * watermarks being breached while under pressure, we reduce the
  2727. * per-cpu vmstat threshold while kswapd is awake and restore
  2728. * them before going back to sleep.
  2729. */
  2730. set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
  2731. /*
  2732. * Compaction records what page blocks it recently failed to
  2733. * isolate pages from and skips them in the future scanning.
  2734. * When kswapd is going to sleep, it is reasonable to assume
  2735. * that pages and compaction may succeed so reset the cache.
  2736. */
  2737. reset_isolation_suitable(pgdat);
  2738. if (!kthread_should_stop())
  2739. schedule();
  2740. set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
  2741. } else {
  2742. if (remaining)
  2743. count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
  2744. else
  2745. count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
  2746. }
  2747. finish_wait(&pgdat->kswapd_wait, &wait);
  2748. }
  2749. /*
  2750. * The background pageout daemon, started as a kernel thread
  2751. * from the init process.
  2752. *
  2753. * This basically trickles out pages so that we have _some_
  2754. * free memory available even if there is no other activity
  2755. * that frees anything up. This is needed for things like routing
  2756. * etc, where we otherwise might have all activity going on in
  2757. * asynchronous contexts that cannot page things out.
  2758. *
  2759. * If there are applications that are active memory-allocators
  2760. * (most normal use), this basically shouldn't matter.
  2761. */
  2762. static int kswapd(void *p)
  2763. {
  2764. unsigned long order, new_order;
  2765. unsigned balanced_order;
  2766. int classzone_idx, new_classzone_idx;
  2767. int balanced_classzone_idx;
  2768. pg_data_t *pgdat = (pg_data_t*)p;
  2769. struct task_struct *tsk = current;
  2770. struct reclaim_state reclaim_state = {
  2771. .reclaimed_slab = 0,
  2772. };
  2773. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  2774. lockdep_set_current_reclaim_state(GFP_KERNEL);
  2775. if (!cpumask_empty(cpumask))
  2776. set_cpus_allowed_ptr(tsk, cpumask);
  2777. current->reclaim_state = &reclaim_state;
  2778. /*
  2779. * Tell the memory management that we're a "memory allocator",
  2780. * and that if we need more memory we should get access to it
  2781. * regardless (see "__alloc_pages()"). "kswapd" should
  2782. * never get caught in the normal page freeing logic.
  2783. *
  2784. * (Kswapd normally doesn't need memory anyway, but sometimes
  2785. * you need a small amount of memory in order to be able to
  2786. * page out something else, and this flag essentially protects
  2787. * us from recursively trying to free more memory as we're
  2788. * trying to free the first piece of memory in the first place).
  2789. */
  2790. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  2791. set_freezable();
  2792. order = new_order = 0;
  2793. balanced_order = 0;
  2794. classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
  2795. balanced_classzone_idx = classzone_idx;
  2796. for ( ; ; ) {
  2797. bool ret;
  2798. /*
  2799. * If the last balance_pgdat was unsuccessful it's unlikely a
  2800. * new request of a similar or harder type will succeed soon
  2801. * so consider going to sleep on the basis we reclaimed at
  2802. */
  2803. if (balanced_classzone_idx >= new_classzone_idx &&
  2804. balanced_order == new_order) {
  2805. new_order = pgdat->kswapd_max_order;
  2806. new_classzone_idx = pgdat->classzone_idx;
  2807. pgdat->kswapd_max_order = 0;
  2808. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2809. }
  2810. if (order < new_order || classzone_idx > new_classzone_idx) {
  2811. /*
  2812. * Don't sleep if someone wants a larger 'order'
  2813. * allocation or has tigher zone constraints
  2814. */
  2815. order = new_order;
  2816. classzone_idx = new_classzone_idx;
  2817. } else {
  2818. kswapd_try_to_sleep(pgdat, balanced_order,
  2819. balanced_classzone_idx);
  2820. order = pgdat->kswapd_max_order;
  2821. classzone_idx = pgdat->classzone_idx;
  2822. new_order = order;
  2823. new_classzone_idx = classzone_idx;
  2824. pgdat->kswapd_max_order = 0;
  2825. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2826. }
  2827. ret = try_to_freeze();
  2828. if (kthread_should_stop())
  2829. break;
  2830. /*
  2831. * We can speed up thawing tasks if we don't call balance_pgdat
  2832. * after returning from the refrigerator
  2833. */
  2834. if (!ret) {
  2835. trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
  2836. balanced_classzone_idx = classzone_idx;
  2837. balanced_order = balance_pgdat(pgdat, order,
  2838. &balanced_classzone_idx);
  2839. }
  2840. }
  2841. current->reclaim_state = NULL;
  2842. return 0;
  2843. }
  2844. /*
  2845. * A zone is low on free memory, so wake its kswapd task to service it.
  2846. */
  2847. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  2848. {
  2849. pg_data_t *pgdat;
  2850. if (!populated_zone(zone))
  2851. return;
  2852. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2853. return;
  2854. pgdat = zone->zone_pgdat;
  2855. if (pgdat->kswapd_max_order < order) {
  2856. pgdat->kswapd_max_order = order;
  2857. pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
  2858. }
  2859. if (!waitqueue_active(&pgdat->kswapd_wait))
  2860. return;
  2861. if (zone_balanced(zone, order, 0, 0))
  2862. return;
  2863. trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
  2864. wake_up_interruptible(&pgdat->kswapd_wait);
  2865. }
  2866. /*
  2867. * The reclaimable count would be mostly accurate.
  2868. * The less reclaimable pages may be
  2869. * - mlocked pages, which will be moved to unevictable list when encountered
  2870. * - mapped pages, which may require several travels to be reclaimed
  2871. * - dirty pages, which is not "instantly" reclaimable
  2872. */
  2873. unsigned long global_reclaimable_pages(void)
  2874. {
  2875. int nr;
  2876. nr = global_page_state(NR_ACTIVE_FILE) +
  2877. global_page_state(NR_INACTIVE_FILE);
  2878. if (get_nr_swap_pages() > 0)
  2879. nr += global_page_state(NR_ACTIVE_ANON) +
  2880. global_page_state(NR_INACTIVE_ANON);
  2881. return nr;
  2882. }
  2883. #ifdef CONFIG_HIBERNATION
  2884. /*
  2885. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  2886. * freed pages.
  2887. *
  2888. * Rather than trying to age LRUs the aim is to preserve the overall
  2889. * LRU order by reclaiming preferentially
  2890. * inactive > active > active referenced > active mapped
  2891. */
  2892. unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  2893. {
  2894. struct reclaim_state reclaim_state;
  2895. struct scan_control sc = {
  2896. .gfp_mask = GFP_HIGHUSER_MOVABLE,
  2897. .may_swap = 1,
  2898. .may_unmap = 1,
  2899. .may_writepage = 1,
  2900. .nr_to_reclaim = nr_to_reclaim,
  2901. .hibernation_mode = 1,
  2902. .order = 0,
  2903. .priority = DEF_PRIORITY,
  2904. };
  2905. struct shrink_control shrink = {
  2906. .gfp_mask = sc.gfp_mask,
  2907. };
  2908. struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  2909. struct task_struct *p = current;
  2910. unsigned long nr_reclaimed;
  2911. p->flags |= PF_MEMALLOC;
  2912. lockdep_set_current_reclaim_state(sc.gfp_mask);
  2913. reclaim_state.reclaimed_slab = 0;
  2914. p->reclaim_state = &reclaim_state;
  2915. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2916. p->reclaim_state = NULL;
  2917. lockdep_clear_current_reclaim_state();
  2918. p->flags &= ~PF_MEMALLOC;
  2919. return nr_reclaimed;
  2920. }
  2921. #endif /* CONFIG_HIBERNATION */
  2922. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2923. not required for correctness. So if the last cpu in a node goes
  2924. away, we get changed to run anywhere: as the first one comes back,
  2925. restore their cpu bindings. */
  2926. static int cpu_callback(struct notifier_block *nfb, unsigned long action,
  2927. void *hcpu)
  2928. {
  2929. int nid;
  2930. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2931. for_each_node_state(nid, N_MEMORY) {
  2932. pg_data_t *pgdat = NODE_DATA(nid);
  2933. const struct cpumask *mask;
  2934. mask = cpumask_of_node(pgdat->node_id);
  2935. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2936. /* One of our CPUs online: restore mask */
  2937. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2938. }
  2939. }
  2940. return NOTIFY_OK;
  2941. }
  2942. /*
  2943. * This kswapd start function will be called by init and node-hot-add.
  2944. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2945. */
  2946. int kswapd_run(int nid)
  2947. {
  2948. pg_data_t *pgdat = NODE_DATA(nid);
  2949. int ret = 0;
  2950. if (pgdat->kswapd)
  2951. return 0;
  2952. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2953. if (IS_ERR(pgdat->kswapd)) {
  2954. /* failure at boot is fatal */
  2955. BUG_ON(system_state == SYSTEM_BOOTING);
  2956. pr_err("Failed to start kswapd on node %d\n", nid);
  2957. ret = PTR_ERR(pgdat->kswapd);
  2958. pgdat->kswapd = NULL;
  2959. }
  2960. return ret;
  2961. }
  2962. /*
  2963. * Called by memory hotplug when all memory in a node is offlined. Caller must
  2964. * hold lock_memory_hotplug().
  2965. */
  2966. void kswapd_stop(int nid)
  2967. {
  2968. struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
  2969. if (kswapd) {
  2970. kthread_stop(kswapd);
  2971. NODE_DATA(nid)->kswapd = NULL;
  2972. }
  2973. }
  2974. static int __init kswapd_init(void)
  2975. {
  2976. int nid;
  2977. swap_setup();
  2978. for_each_node_state(nid, N_MEMORY)
  2979. kswapd_run(nid);
  2980. hotcpu_notifier(cpu_callback, 0);
  2981. return 0;
  2982. }
  2983. module_init(kswapd_init)
  2984. #ifdef CONFIG_NUMA
  2985. /*
  2986. * Zone reclaim mode
  2987. *
  2988. * If non-zero call zone_reclaim when the number of free pages falls below
  2989. * the watermarks.
  2990. */
  2991. int zone_reclaim_mode __read_mostly;
  2992. #define RECLAIM_OFF 0
  2993. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2994. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2995. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2996. /*
  2997. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2998. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2999. * a zone.
  3000. */
  3001. #define ZONE_RECLAIM_PRIORITY 4
  3002. /*
  3003. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  3004. * occur.
  3005. */
  3006. int sysctl_min_unmapped_ratio = 1;
  3007. /*
  3008. * If the number of slab pages in a zone grows beyond this percentage then
  3009. * slab reclaim needs to occur.
  3010. */
  3011. int sysctl_min_slab_ratio = 5;
  3012. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  3013. {
  3014. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  3015. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  3016. zone_page_state(zone, NR_ACTIVE_FILE);
  3017. /*
  3018. * It's possible for there to be more file mapped pages than
  3019. * accounted for by the pages on the file LRU lists because
  3020. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  3021. */
  3022. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  3023. }
  3024. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  3025. static long zone_pagecache_reclaimable(struct zone *zone)
  3026. {
  3027. long nr_pagecache_reclaimable;
  3028. long delta = 0;
  3029. /*
  3030. * If RECLAIM_SWAP is set, then all file pages are considered
  3031. * potentially reclaimable. Otherwise, we have to worry about
  3032. * pages like swapcache and zone_unmapped_file_pages() provides
  3033. * a better estimate
  3034. */
  3035. if (zone_reclaim_mode & RECLAIM_SWAP)
  3036. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  3037. else
  3038. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  3039. /* If we can't clean pages, remove dirty pages from consideration */
  3040. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  3041. delta += zone_page_state(zone, NR_FILE_DIRTY);
  3042. /* Watch for any possible underflows due to delta */
  3043. if (unlikely(delta > nr_pagecache_reclaimable))
  3044. delta = nr_pagecache_reclaimable;
  3045. return nr_pagecache_reclaimable - delta;
  3046. }
  3047. /*
  3048. * Try to free up some pages from this zone through reclaim.
  3049. */
  3050. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  3051. {
  3052. /* Minimum pages needed in order to stay on node */
  3053. const unsigned long nr_pages = 1 << order;
  3054. struct task_struct *p = current;
  3055. struct reclaim_state reclaim_state;
  3056. struct scan_control sc = {
  3057. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  3058. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  3059. .may_swap = 1,
  3060. .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
  3061. .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
  3062. .order = order,
  3063. .priority = ZONE_RECLAIM_PRIORITY,
  3064. };
  3065. struct shrink_control shrink = {
  3066. .gfp_mask = sc.gfp_mask,
  3067. };
  3068. unsigned long nr_slab_pages0, nr_slab_pages1;
  3069. cond_resched();
  3070. /*
  3071. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  3072. * and we also need to be able to write out pages for RECLAIM_WRITE
  3073. * and RECLAIM_SWAP.
  3074. */
  3075. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  3076. lockdep_set_current_reclaim_state(gfp_mask);
  3077. reclaim_state.reclaimed_slab = 0;
  3078. p->reclaim_state = &reclaim_state;
  3079. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  3080. /*
  3081. * Free memory by calling shrink zone with increasing
  3082. * priorities until we have enough memory freed.
  3083. */
  3084. do {
  3085. shrink_zone(zone, &sc);
  3086. } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
  3087. }
  3088. nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  3089. if (nr_slab_pages0 > zone->min_slab_pages) {
  3090. /*
  3091. * shrink_slab() does not currently allow us to determine how
  3092. * many pages were freed in this zone. So we take the current
  3093. * number of slab pages and shake the slab until it is reduced
  3094. * by the same nr_pages that we used for reclaiming unmapped
  3095. * pages.
  3096. */
  3097. nodes_clear(shrink.nodes_to_scan);
  3098. node_set(zone_to_nid(zone), shrink.nodes_to_scan);
  3099. for (;;) {
  3100. unsigned long lru_pages = zone_reclaimable_pages(zone);
  3101. /* No reclaimable slab or very low memory pressure */
  3102. if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
  3103. break;
  3104. /* Freed enough memory */
  3105. nr_slab_pages1 = zone_page_state(zone,
  3106. NR_SLAB_RECLAIMABLE);
  3107. if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
  3108. break;
  3109. }
  3110. /*
  3111. * Update nr_reclaimed by the number of slab pages we
  3112. * reclaimed from this zone.
  3113. */
  3114. nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  3115. if (nr_slab_pages1 < nr_slab_pages0)
  3116. sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
  3117. }
  3118. p->reclaim_state = NULL;
  3119. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  3120. lockdep_clear_current_reclaim_state();
  3121. return sc.nr_reclaimed >= nr_pages;
  3122. }
  3123. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  3124. {
  3125. int node_id;
  3126. int ret;
  3127. /*
  3128. * Zone reclaim reclaims unmapped file backed pages and
  3129. * slab pages if we are over the defined limits.
  3130. *
  3131. * A small portion of unmapped file backed pages is needed for
  3132. * file I/O otherwise pages read by file I/O will be immediately
  3133. * thrown out if the zone is overallocated. So we do not reclaim
  3134. * if less than a specified percentage of the zone is used by
  3135. * unmapped file backed pages.
  3136. */
  3137. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  3138. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  3139. return ZONE_RECLAIM_FULL;
  3140. if (!zone_reclaimable(zone))
  3141. return ZONE_RECLAIM_FULL;
  3142. /*
  3143. * Do not scan if the allocation should not be delayed.
  3144. */
  3145. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  3146. return ZONE_RECLAIM_NOSCAN;
  3147. /*
  3148. * Only run zone reclaim on the local zone or on zones that do not
  3149. * have associated processors. This will favor the local processor
  3150. * over remote processors and spread off node memory allocations
  3151. * as wide as possible.
  3152. */
  3153. node_id = zone_to_nid(zone);
  3154. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  3155. return ZONE_RECLAIM_NOSCAN;
  3156. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  3157. return ZONE_RECLAIM_NOSCAN;
  3158. ret = __zone_reclaim(zone, gfp_mask, order);
  3159. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  3160. if (!ret)
  3161. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  3162. return ret;
  3163. }
  3164. #endif
  3165. /*
  3166. * page_evictable - test whether a page is evictable
  3167. * @page: the page to test
  3168. *
  3169. * Test whether page is evictable--i.e., should be placed on active/inactive
  3170. * lists vs unevictable list.
  3171. *
  3172. * Reasons page might not be evictable:
  3173. * (1) page's mapping marked unevictable
  3174. * (2) page is part of an mlocked VMA
  3175. *
  3176. */
  3177. int page_evictable(struct page *page)
  3178. {
  3179. return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
  3180. }
  3181. #ifdef CONFIG_SHMEM
  3182. /**
  3183. * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
  3184. * @pages: array of pages to check
  3185. * @nr_pages: number of pages to check
  3186. *
  3187. * Checks pages for evictability and moves them to the appropriate lru list.
  3188. *
  3189. * This function is only used for SysV IPC SHM_UNLOCK.
  3190. */
  3191. void check_move_unevictable_pages(struct page **pages, int nr_pages)
  3192. {
  3193. struct lruvec *lruvec;
  3194. struct zone *zone = NULL;
  3195. int pgscanned = 0;
  3196. int pgrescued = 0;
  3197. int i;
  3198. for (i = 0; i < nr_pages; i++) {
  3199. struct page *page = pages[i];
  3200. struct zone *pagezone;
  3201. pgscanned++;
  3202. pagezone = page_zone(page);
  3203. if (pagezone != zone) {
  3204. if (zone)
  3205. spin_unlock_irq(&zone->lru_lock);
  3206. zone = pagezone;
  3207. spin_lock_irq(&zone->lru_lock);
  3208. }
  3209. lruvec = mem_cgroup_page_lruvec(page, zone);
  3210. if (!PageLRU(page) || !PageUnevictable(page))
  3211. continue;
  3212. if (page_evictable(page)) {
  3213. enum lru_list lru = page_lru_base_type(page);
  3214. VM_BUG_ON(PageActive(page));
  3215. ClearPageUnevictable(page);
  3216. del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
  3217. add_page_to_lru_list(page, lruvec, lru);
  3218. pgrescued++;
  3219. }
  3220. }
  3221. if (zone) {
  3222. __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
  3223. __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
  3224. spin_unlock_irq(&zone->lru_lock);
  3225. }
  3226. }
  3227. #endif /* CONFIG_SHMEM */
  3228. static void warn_scan_unevictable_pages(void)
  3229. {
  3230. printk_once(KERN_WARNING
  3231. "%s: The scan_unevictable_pages sysctl/node-interface has been "
  3232. "disabled for lack of a legitimate use case. If you have "
  3233. "one, please send an email to linux-mm@kvack.org.\n",
  3234. current->comm);
  3235. }
  3236. /*
  3237. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  3238. * all nodes' unevictable lists for evictable pages
  3239. */
  3240. unsigned long scan_unevictable_pages;
  3241. int scan_unevictable_handler(struct ctl_table *table, int write,
  3242. void __user *buffer,
  3243. size_t *length, loff_t *ppos)
  3244. {
  3245. warn_scan_unevictable_pages();
  3246. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  3247. scan_unevictable_pages = 0;
  3248. return 0;
  3249. }
  3250. #ifdef CONFIG_NUMA
  3251. /*
  3252. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  3253. * a specified node's per zone unevictable lists for evictable pages.
  3254. */
  3255. static ssize_t read_scan_unevictable_node(struct device *dev,
  3256. struct device_attribute *attr,
  3257. char *buf)
  3258. {
  3259. warn_scan_unevictable_pages();
  3260. return sprintf(buf, "0\n"); /* always zero; should fit... */
  3261. }
  3262. static ssize_t write_scan_unevictable_node(struct device *dev,
  3263. struct device_attribute *attr,
  3264. const char *buf, size_t count)
  3265. {
  3266. warn_scan_unevictable_pages();
  3267. return 1;
  3268. }
  3269. static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  3270. read_scan_unevictable_node,
  3271. write_scan_unevictable_node);
  3272. int scan_unevictable_register_node(struct node *node)
  3273. {
  3274. return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3275. }
  3276. void scan_unevictable_unregister_node(struct node *node)
  3277. {
  3278. device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
  3279. }
  3280. #endif