vmscan.c 93 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/gfp.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/backing-dev.h>
  29. #include <linux/rmap.h>
  30. #include <linux/topology.h>
  31. #include <linux/cpu.h>
  32. #include <linux/cpuset.h>
  33. #include <linux/compaction.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <linux/oom.h>
  43. #include <linux/prefetch.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/div64.h>
  46. #include <linux/swapops.h>
  47. #include "internal.h"
  48. #define CREATE_TRACE_POINTS
  49. #include <trace/events/vmscan.h>
  50. struct scan_control {
  51. /* Incremented by the number of inactive pages that were scanned */
  52. unsigned long nr_scanned;
  53. /* Number of pages freed so far during a call to shrink_zones() */
  54. unsigned long nr_reclaimed;
  55. /* How many pages shrink_list() should reclaim */
  56. unsigned long nr_to_reclaim;
  57. unsigned long hibernation_mode;
  58. /* This context's GFP mask */
  59. gfp_t gfp_mask;
  60. int may_writepage;
  61. /* Can mapped pages be reclaimed? */
  62. int may_unmap;
  63. /* Can pages be swapped as part of reclaim? */
  64. int may_swap;
  65. int order;
  66. /* Scan (total_size >> priority) pages at once */
  67. int priority;
  68. /*
  69. * The memory cgroup that hit its limit and as a result is the
  70. * primary target of this reclaim invocation.
  71. */
  72. struct mem_cgroup *target_mem_cgroup;
  73. /*
  74. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  75. * are scanned.
  76. */
  77. nodemask_t *nodemask;
  78. };
  79. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  80. #ifdef ARCH_HAS_PREFETCH
  81. #define prefetch_prev_lru_page(_page, _base, _field) \
  82. do { \
  83. if ((_page)->lru.prev != _base) { \
  84. struct page *prev; \
  85. \
  86. prev = lru_to_page(&(_page->lru)); \
  87. prefetch(&prev->_field); \
  88. } \
  89. } while (0)
  90. #else
  91. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  92. #endif
  93. #ifdef ARCH_HAS_PREFETCHW
  94. #define prefetchw_prev_lru_page(_page, _base, _field) \
  95. do { \
  96. if ((_page)->lru.prev != _base) { \
  97. struct page *prev; \
  98. \
  99. prev = lru_to_page(&(_page->lru)); \
  100. prefetchw(&prev->_field); \
  101. } \
  102. } while (0)
  103. #else
  104. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  105. #endif
  106. /*
  107. * From 0 .. 100. Higher means more swappy.
  108. */
  109. int vm_swappiness = 60;
  110. long vm_total_pages; /* The total number of pages which the VM controls */
  111. static LIST_HEAD(shrinker_list);
  112. static DECLARE_RWSEM(shrinker_rwsem);
  113. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  114. static bool global_reclaim(struct scan_control *sc)
  115. {
  116. return !sc->target_mem_cgroup;
  117. }
  118. #else
  119. static bool global_reclaim(struct scan_control *sc)
  120. {
  121. return true;
  122. }
  123. #endif
  124. static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  125. {
  126. if (!mem_cgroup_disabled())
  127. return mem_cgroup_get_lru_size(lruvec, lru);
  128. return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
  129. }
  130. /*
  131. * Add a shrinker callback to be called from the vm
  132. */
  133. void register_shrinker(struct shrinker *shrinker)
  134. {
  135. atomic_long_set(&shrinker->nr_in_batch, 0);
  136. down_write(&shrinker_rwsem);
  137. list_add_tail(&shrinker->list, &shrinker_list);
  138. up_write(&shrinker_rwsem);
  139. }
  140. EXPORT_SYMBOL(register_shrinker);
  141. /*
  142. * Remove one
  143. */
  144. void unregister_shrinker(struct shrinker *shrinker)
  145. {
  146. down_write(&shrinker_rwsem);
  147. list_del(&shrinker->list);
  148. up_write(&shrinker_rwsem);
  149. }
  150. EXPORT_SYMBOL(unregister_shrinker);
  151. static inline int do_shrinker_shrink(struct shrinker *shrinker,
  152. struct shrink_control *sc,
  153. unsigned long nr_to_scan)
  154. {
  155. sc->nr_to_scan = nr_to_scan;
  156. return (*shrinker->shrink)(shrinker, sc);
  157. }
  158. #define SHRINK_BATCH 128
  159. /*
  160. * Call the shrink functions to age shrinkable caches
  161. *
  162. * Here we assume it costs one seek to replace a lru page and that it also
  163. * takes a seek to recreate a cache object. With this in mind we age equal
  164. * percentages of the lru and ageable caches. This should balance the seeks
  165. * generated by these structures.
  166. *
  167. * If the vm encountered mapped pages on the LRU it increase the pressure on
  168. * slab to avoid swapping.
  169. *
  170. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  171. *
  172. * `lru_pages' represents the number of on-LRU pages in all the zones which
  173. * are eligible for the caller's allocation attempt. It is used for balancing
  174. * slab reclaim versus page reclaim.
  175. *
  176. * Returns the number of slab objects which we shrunk.
  177. */
  178. unsigned long shrink_slab(struct shrink_control *shrink,
  179. unsigned long nr_pages_scanned,
  180. unsigned long lru_pages)
  181. {
  182. struct shrinker *shrinker;
  183. unsigned long ret = 0;
  184. if (nr_pages_scanned == 0)
  185. nr_pages_scanned = SWAP_CLUSTER_MAX;
  186. if (!down_read_trylock(&shrinker_rwsem)) {
  187. /* Assume we'll be able to shrink next time */
  188. ret = 1;
  189. goto out;
  190. }
  191. list_for_each_entry(shrinker, &shrinker_list, list) {
  192. unsigned long long delta;
  193. long total_scan;
  194. long max_pass;
  195. int shrink_ret = 0;
  196. long nr;
  197. long new_nr;
  198. long batch_size = shrinker->batch ? shrinker->batch
  199. : SHRINK_BATCH;
  200. max_pass = do_shrinker_shrink(shrinker, shrink, 0);
  201. if (max_pass <= 0)
  202. continue;
  203. /*
  204. * copy the current shrinker scan count into a local variable
  205. * and zero it so that other concurrent shrinker invocations
  206. * don't also do this scanning work.
  207. */
  208. nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
  209. total_scan = nr;
  210. delta = (4 * nr_pages_scanned) / shrinker->seeks;
  211. delta *= max_pass;
  212. do_div(delta, lru_pages + 1);
  213. total_scan += delta;
  214. if (total_scan < 0) {
  215. printk(KERN_ERR "shrink_slab: %pF negative objects to "
  216. "delete nr=%ld\n",
  217. shrinker->shrink, total_scan);
  218. total_scan = max_pass;
  219. }
  220. /*
  221. * We need to avoid excessive windup on filesystem shrinkers
  222. * due to large numbers of GFP_NOFS allocations causing the
  223. * shrinkers to return -1 all the time. This results in a large
  224. * nr being built up so when a shrink that can do some work
  225. * comes along it empties the entire cache due to nr >>>
  226. * max_pass. This is bad for sustaining a working set in
  227. * memory.
  228. *
  229. * Hence only allow the shrinker to scan the entire cache when
  230. * a large delta change is calculated directly.
  231. */
  232. if (delta < max_pass / 4)
  233. total_scan = min(total_scan, max_pass / 2);
  234. /*
  235. * Avoid risking looping forever due to too large nr value:
  236. * never try to free more than twice the estimate number of
  237. * freeable entries.
  238. */
  239. if (total_scan > max_pass * 2)
  240. total_scan = max_pass * 2;
  241. trace_mm_shrink_slab_start(shrinker, shrink, nr,
  242. nr_pages_scanned, lru_pages,
  243. max_pass, delta, total_scan);
  244. while (total_scan >= batch_size) {
  245. int nr_before;
  246. nr_before = do_shrinker_shrink(shrinker, shrink, 0);
  247. shrink_ret = do_shrinker_shrink(shrinker, shrink,
  248. batch_size);
  249. if (shrink_ret == -1)
  250. break;
  251. if (shrink_ret < nr_before)
  252. ret += nr_before - shrink_ret;
  253. count_vm_events(SLABS_SCANNED, batch_size);
  254. total_scan -= batch_size;
  255. cond_resched();
  256. }
  257. /*
  258. * move the unused scan count back into the shrinker in a
  259. * manner that handles concurrent updates. If we exhausted the
  260. * scan, there is no need to do an update.
  261. */
  262. if (total_scan > 0)
  263. new_nr = atomic_long_add_return(total_scan,
  264. &shrinker->nr_in_batch);
  265. else
  266. new_nr = atomic_long_read(&shrinker->nr_in_batch);
  267. trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
  268. }
  269. up_read(&shrinker_rwsem);
  270. out:
  271. cond_resched();
  272. return ret;
  273. }
  274. static inline int is_page_cache_freeable(struct page *page)
  275. {
  276. /*
  277. * A freeable page cache page is referenced only by the caller
  278. * that isolated the page, the page cache radix tree and
  279. * optional buffer heads at page->private.
  280. */
  281. return page_count(page) - page_has_private(page) == 2;
  282. }
  283. static int may_write_to_queue(struct backing_dev_info *bdi,
  284. struct scan_control *sc)
  285. {
  286. if (current->flags & PF_SWAPWRITE)
  287. return 1;
  288. if (!bdi_write_congested(bdi))
  289. return 1;
  290. if (bdi == current->backing_dev_info)
  291. return 1;
  292. return 0;
  293. }
  294. /*
  295. * We detected a synchronous write error writing a page out. Probably
  296. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  297. * fsync(), msync() or close().
  298. *
  299. * The tricky part is that after writepage we cannot touch the mapping: nothing
  300. * prevents it from being freed up. But we have a ref on the page and once
  301. * that page is locked, the mapping is pinned.
  302. *
  303. * We're allowed to run sleeping lock_page() here because we know the caller has
  304. * __GFP_FS.
  305. */
  306. static void handle_write_error(struct address_space *mapping,
  307. struct page *page, int error)
  308. {
  309. lock_page(page);
  310. if (page_mapping(page) == mapping)
  311. mapping_set_error(mapping, error);
  312. unlock_page(page);
  313. }
  314. /* possible outcome of pageout() */
  315. typedef enum {
  316. /* failed to write page out, page is locked */
  317. PAGE_KEEP,
  318. /* move page to the active list, page is locked */
  319. PAGE_ACTIVATE,
  320. /* page has been sent to the disk successfully, page is unlocked */
  321. PAGE_SUCCESS,
  322. /* page is clean and locked */
  323. PAGE_CLEAN,
  324. } pageout_t;
  325. /*
  326. * pageout is called by shrink_page_list() for each dirty page.
  327. * Calls ->writepage().
  328. */
  329. static pageout_t pageout(struct page *page, struct address_space *mapping,
  330. struct scan_control *sc)
  331. {
  332. /*
  333. * If the page is dirty, only perform writeback if that write
  334. * will be non-blocking. To prevent this allocation from being
  335. * stalled by pagecache activity. But note that there may be
  336. * stalls if we need to run get_block(). We could test
  337. * PagePrivate for that.
  338. *
  339. * If this process is currently in __generic_file_aio_write() against
  340. * this page's queue, we can perform writeback even if that
  341. * will block.
  342. *
  343. * If the page is swapcache, write it back even if that would
  344. * block, for some throttling. This happens by accident, because
  345. * swap_backing_dev_info is bust: it doesn't reflect the
  346. * congestion state of the swapdevs. Easy to fix, if needed.
  347. */
  348. if (!is_page_cache_freeable(page))
  349. return PAGE_KEEP;
  350. if (!mapping) {
  351. /*
  352. * Some data journaling orphaned pages can have
  353. * page->mapping == NULL while being dirty with clean buffers.
  354. */
  355. if (page_has_private(page)) {
  356. if (try_to_free_buffers(page)) {
  357. ClearPageDirty(page);
  358. printk("%s: orphaned page\n", __func__);
  359. return PAGE_CLEAN;
  360. }
  361. }
  362. return PAGE_KEEP;
  363. }
  364. if (mapping->a_ops->writepage == NULL)
  365. return PAGE_ACTIVATE;
  366. if (!may_write_to_queue(mapping->backing_dev_info, sc))
  367. return PAGE_KEEP;
  368. if (clear_page_dirty_for_io(page)) {
  369. int res;
  370. struct writeback_control wbc = {
  371. .sync_mode = WB_SYNC_NONE,
  372. .nr_to_write = SWAP_CLUSTER_MAX,
  373. .range_start = 0,
  374. .range_end = LLONG_MAX,
  375. .for_reclaim = 1,
  376. };
  377. SetPageReclaim(page);
  378. res = mapping->a_ops->writepage(page, &wbc);
  379. if (res < 0)
  380. handle_write_error(mapping, page, res);
  381. if (res == AOP_WRITEPAGE_ACTIVATE) {
  382. ClearPageReclaim(page);
  383. return PAGE_ACTIVATE;
  384. }
  385. if (!PageWriteback(page)) {
  386. /* synchronous write or broken a_ops? */
  387. ClearPageReclaim(page);
  388. }
  389. trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
  390. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  391. return PAGE_SUCCESS;
  392. }
  393. return PAGE_CLEAN;
  394. }
  395. /*
  396. * Same as remove_mapping, but if the page is removed from the mapping, it
  397. * gets returned with a refcount of 0.
  398. */
  399. static int __remove_mapping(struct address_space *mapping, struct page *page)
  400. {
  401. BUG_ON(!PageLocked(page));
  402. BUG_ON(mapping != page_mapping(page));
  403. spin_lock_irq(&mapping->tree_lock);
  404. /*
  405. * The non racy check for a busy page.
  406. *
  407. * Must be careful with the order of the tests. When someone has
  408. * a ref to the page, it may be possible that they dirty it then
  409. * drop the reference. So if PageDirty is tested before page_count
  410. * here, then the following race may occur:
  411. *
  412. * get_user_pages(&page);
  413. * [user mapping goes away]
  414. * write_to(page);
  415. * !PageDirty(page) [good]
  416. * SetPageDirty(page);
  417. * put_page(page);
  418. * !page_count(page) [good, discard it]
  419. *
  420. * [oops, our write_to data is lost]
  421. *
  422. * Reversing the order of the tests ensures such a situation cannot
  423. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  424. * load is not satisfied before that of page->_count.
  425. *
  426. * Note that if SetPageDirty is always performed via set_page_dirty,
  427. * and thus under tree_lock, then this ordering is not required.
  428. */
  429. if (!page_freeze_refs(page, 2))
  430. goto cannot_free;
  431. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  432. if (unlikely(PageDirty(page))) {
  433. page_unfreeze_refs(page, 2);
  434. goto cannot_free;
  435. }
  436. if (PageSwapCache(page)) {
  437. swp_entry_t swap = { .val = page_private(page) };
  438. __delete_from_swap_cache(page);
  439. spin_unlock_irq(&mapping->tree_lock);
  440. swapcache_free(swap, page);
  441. } else {
  442. void (*freepage)(struct page *);
  443. freepage = mapping->a_ops->freepage;
  444. __delete_from_page_cache(page);
  445. spin_unlock_irq(&mapping->tree_lock);
  446. mem_cgroup_uncharge_cache_page(page);
  447. if (freepage != NULL)
  448. freepage(page);
  449. }
  450. return 1;
  451. cannot_free:
  452. spin_unlock_irq(&mapping->tree_lock);
  453. return 0;
  454. }
  455. /*
  456. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  457. * someone else has a ref on the page, abort and return 0. If it was
  458. * successfully detached, return 1. Assumes the caller has a single ref on
  459. * this page.
  460. */
  461. int remove_mapping(struct address_space *mapping, struct page *page)
  462. {
  463. if (__remove_mapping(mapping, page)) {
  464. /*
  465. * Unfreezing the refcount with 1 rather than 2 effectively
  466. * drops the pagecache ref for us without requiring another
  467. * atomic operation.
  468. */
  469. page_unfreeze_refs(page, 1);
  470. return 1;
  471. }
  472. return 0;
  473. }
  474. /**
  475. * putback_lru_page - put previously isolated page onto appropriate LRU list
  476. * @page: page to be put back to appropriate lru list
  477. *
  478. * Add previously isolated @page to appropriate LRU list.
  479. * Page may still be unevictable for other reasons.
  480. *
  481. * lru_lock must not be held, interrupts must be enabled.
  482. */
  483. void putback_lru_page(struct page *page)
  484. {
  485. int lru;
  486. int active = !!TestClearPageActive(page);
  487. int was_unevictable = PageUnevictable(page);
  488. VM_BUG_ON(PageLRU(page));
  489. redo:
  490. ClearPageUnevictable(page);
  491. if (page_evictable(page, NULL)) {
  492. /*
  493. * For evictable pages, we can use the cache.
  494. * In event of a race, worst case is we end up with an
  495. * unevictable page on [in]active list.
  496. * We know how to handle that.
  497. */
  498. lru = active + page_lru_base_type(page);
  499. lru_cache_add_lru(page, lru);
  500. } else {
  501. /*
  502. * Put unevictable pages directly on zone's unevictable
  503. * list.
  504. */
  505. lru = LRU_UNEVICTABLE;
  506. add_page_to_unevictable_list(page);
  507. /*
  508. * When racing with an mlock or AS_UNEVICTABLE clearing
  509. * (page is unlocked) make sure that if the other thread
  510. * does not observe our setting of PG_lru and fails
  511. * isolation/check_move_unevictable_pages,
  512. * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
  513. * the page back to the evictable list.
  514. *
  515. * The other side is TestClearPageMlocked() or shmem_lock().
  516. */
  517. smp_mb();
  518. }
  519. /*
  520. * page's status can change while we move it among lru. If an evictable
  521. * page is on unevictable list, it never be freed. To avoid that,
  522. * check after we added it to the list, again.
  523. */
  524. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  525. if (!isolate_lru_page(page)) {
  526. put_page(page);
  527. goto redo;
  528. }
  529. /* This means someone else dropped this page from LRU
  530. * So, it will be freed or putback to LRU again. There is
  531. * nothing to do here.
  532. */
  533. }
  534. if (was_unevictable && lru != LRU_UNEVICTABLE)
  535. count_vm_event(UNEVICTABLE_PGRESCUED);
  536. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  537. count_vm_event(UNEVICTABLE_PGCULLED);
  538. put_page(page); /* drop ref from isolate */
  539. }
  540. enum page_references {
  541. PAGEREF_RECLAIM,
  542. PAGEREF_RECLAIM_CLEAN,
  543. PAGEREF_KEEP,
  544. PAGEREF_ACTIVATE,
  545. };
  546. static enum page_references page_check_references(struct page *page,
  547. struct scan_control *sc)
  548. {
  549. int referenced_ptes, referenced_page;
  550. unsigned long vm_flags;
  551. referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
  552. &vm_flags);
  553. referenced_page = TestClearPageReferenced(page);
  554. /*
  555. * Mlock lost the isolation race with us. Let try_to_unmap()
  556. * move the page to the unevictable list.
  557. */
  558. if (vm_flags & VM_LOCKED)
  559. return PAGEREF_RECLAIM;
  560. if (referenced_ptes) {
  561. if (PageSwapBacked(page))
  562. return PAGEREF_ACTIVATE;
  563. /*
  564. * All mapped pages start out with page table
  565. * references from the instantiating fault, so we need
  566. * to look twice if a mapped file page is used more
  567. * than once.
  568. *
  569. * Mark it and spare it for another trip around the
  570. * inactive list. Another page table reference will
  571. * lead to its activation.
  572. *
  573. * Note: the mark is set for activated pages as well
  574. * so that recently deactivated but used pages are
  575. * quickly recovered.
  576. */
  577. SetPageReferenced(page);
  578. if (referenced_page || referenced_ptes > 1)
  579. return PAGEREF_ACTIVATE;
  580. /*
  581. * Activate file-backed executable pages after first usage.
  582. */
  583. if (vm_flags & VM_EXEC)
  584. return PAGEREF_ACTIVATE;
  585. return PAGEREF_KEEP;
  586. }
  587. /* Reclaim if clean, defer dirty pages to writeback */
  588. if (referenced_page && !PageSwapBacked(page))
  589. return PAGEREF_RECLAIM_CLEAN;
  590. return PAGEREF_RECLAIM;
  591. }
  592. /*
  593. * shrink_page_list() returns the number of reclaimed pages
  594. */
  595. static unsigned long shrink_page_list(struct list_head *page_list,
  596. struct zone *zone,
  597. struct scan_control *sc,
  598. unsigned long *ret_nr_dirty,
  599. unsigned long *ret_nr_writeback)
  600. {
  601. LIST_HEAD(ret_pages);
  602. LIST_HEAD(free_pages);
  603. int pgactivate = 0;
  604. unsigned long nr_dirty = 0;
  605. unsigned long nr_congested = 0;
  606. unsigned long nr_reclaimed = 0;
  607. unsigned long nr_writeback = 0;
  608. cond_resched();
  609. while (!list_empty(page_list)) {
  610. enum page_references references;
  611. struct address_space *mapping;
  612. struct page *page;
  613. int may_enter_fs;
  614. cond_resched();
  615. page = lru_to_page(page_list);
  616. list_del(&page->lru);
  617. if (!trylock_page(page))
  618. goto keep;
  619. VM_BUG_ON(PageActive(page));
  620. VM_BUG_ON(page_zone(page) != zone);
  621. sc->nr_scanned++;
  622. if (unlikely(!page_evictable(page, NULL)))
  623. goto cull_mlocked;
  624. if (!sc->may_unmap && page_mapped(page))
  625. goto keep_locked;
  626. /* Double the slab pressure for mapped and swapcache pages */
  627. if (page_mapped(page) || PageSwapCache(page))
  628. sc->nr_scanned++;
  629. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  630. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  631. if (PageWriteback(page)) {
  632. nr_writeback++;
  633. unlock_page(page);
  634. goto keep;
  635. }
  636. references = page_check_references(page, sc);
  637. switch (references) {
  638. case PAGEREF_ACTIVATE:
  639. goto activate_locked;
  640. case PAGEREF_KEEP:
  641. goto keep_locked;
  642. case PAGEREF_RECLAIM:
  643. case PAGEREF_RECLAIM_CLEAN:
  644. ; /* try to reclaim the page below */
  645. }
  646. /*
  647. * Anonymous process memory has backing store?
  648. * Try to allocate it some swap space here.
  649. */
  650. if (PageAnon(page) && !PageSwapCache(page)) {
  651. if (!(sc->gfp_mask & __GFP_IO))
  652. goto keep_locked;
  653. if (!add_to_swap(page))
  654. goto activate_locked;
  655. may_enter_fs = 1;
  656. }
  657. mapping = page_mapping(page);
  658. /*
  659. * The page is mapped into the page tables of one or more
  660. * processes. Try to unmap it here.
  661. */
  662. if (page_mapped(page) && mapping) {
  663. switch (try_to_unmap(page, TTU_UNMAP)) {
  664. case SWAP_FAIL:
  665. goto activate_locked;
  666. case SWAP_AGAIN:
  667. goto keep_locked;
  668. case SWAP_MLOCK:
  669. goto cull_mlocked;
  670. case SWAP_SUCCESS:
  671. ; /* try to free the page below */
  672. }
  673. }
  674. if (PageDirty(page)) {
  675. nr_dirty++;
  676. /*
  677. * Only kswapd can writeback filesystem pages to
  678. * avoid risk of stack overflow but do not writeback
  679. * unless under significant pressure.
  680. */
  681. if (page_is_file_cache(page) &&
  682. (!current_is_kswapd() ||
  683. sc->priority >= DEF_PRIORITY - 2)) {
  684. /*
  685. * Immediately reclaim when written back.
  686. * Similar in principal to deactivate_page()
  687. * except we already have the page isolated
  688. * and know it's dirty
  689. */
  690. inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
  691. SetPageReclaim(page);
  692. goto keep_locked;
  693. }
  694. if (references == PAGEREF_RECLAIM_CLEAN)
  695. goto keep_locked;
  696. if (!may_enter_fs)
  697. goto keep_locked;
  698. if (!sc->may_writepage)
  699. goto keep_locked;
  700. /* Page is dirty, try to write it out here */
  701. switch (pageout(page, mapping, sc)) {
  702. case PAGE_KEEP:
  703. nr_congested++;
  704. goto keep_locked;
  705. case PAGE_ACTIVATE:
  706. goto activate_locked;
  707. case PAGE_SUCCESS:
  708. if (PageWriteback(page))
  709. goto keep;
  710. if (PageDirty(page))
  711. goto keep;
  712. /*
  713. * A synchronous write - probably a ramdisk. Go
  714. * ahead and try to reclaim the page.
  715. */
  716. if (!trylock_page(page))
  717. goto keep;
  718. if (PageDirty(page) || PageWriteback(page))
  719. goto keep_locked;
  720. mapping = page_mapping(page);
  721. case PAGE_CLEAN:
  722. ; /* try to free the page below */
  723. }
  724. }
  725. /*
  726. * If the page has buffers, try to free the buffer mappings
  727. * associated with this page. If we succeed we try to free
  728. * the page as well.
  729. *
  730. * We do this even if the page is PageDirty().
  731. * try_to_release_page() does not perform I/O, but it is
  732. * possible for a page to have PageDirty set, but it is actually
  733. * clean (all its buffers are clean). This happens if the
  734. * buffers were written out directly, with submit_bh(). ext3
  735. * will do this, as well as the blockdev mapping.
  736. * try_to_release_page() will discover that cleanness and will
  737. * drop the buffers and mark the page clean - it can be freed.
  738. *
  739. * Rarely, pages can have buffers and no ->mapping. These are
  740. * the pages which were not successfully invalidated in
  741. * truncate_complete_page(). We try to drop those buffers here
  742. * and if that worked, and the page is no longer mapped into
  743. * process address space (page_count == 1) it can be freed.
  744. * Otherwise, leave the page on the LRU so it is swappable.
  745. */
  746. if (page_has_private(page)) {
  747. if (!try_to_release_page(page, sc->gfp_mask))
  748. goto activate_locked;
  749. if (!mapping && page_count(page) == 1) {
  750. unlock_page(page);
  751. if (put_page_testzero(page))
  752. goto free_it;
  753. else {
  754. /*
  755. * rare race with speculative reference.
  756. * the speculative reference will free
  757. * this page shortly, so we may
  758. * increment nr_reclaimed here (and
  759. * leave it off the LRU).
  760. */
  761. nr_reclaimed++;
  762. continue;
  763. }
  764. }
  765. }
  766. if (!mapping || !__remove_mapping(mapping, page))
  767. goto keep_locked;
  768. /*
  769. * At this point, we have no other references and there is
  770. * no way to pick any more up (removed from LRU, removed
  771. * from pagecache). Can use non-atomic bitops now (and
  772. * we obviously don't have to worry about waking up a process
  773. * waiting on the page lock, because there are no references.
  774. */
  775. __clear_page_locked(page);
  776. free_it:
  777. nr_reclaimed++;
  778. /*
  779. * Is there need to periodically free_page_list? It would
  780. * appear not as the counts should be low
  781. */
  782. list_add(&page->lru, &free_pages);
  783. continue;
  784. cull_mlocked:
  785. if (PageSwapCache(page))
  786. try_to_free_swap(page);
  787. unlock_page(page);
  788. putback_lru_page(page);
  789. continue;
  790. activate_locked:
  791. /* Not a candidate for swapping, so reclaim swap space. */
  792. if (PageSwapCache(page) && vm_swap_full())
  793. try_to_free_swap(page);
  794. VM_BUG_ON(PageActive(page));
  795. SetPageActive(page);
  796. pgactivate++;
  797. keep_locked:
  798. unlock_page(page);
  799. keep:
  800. list_add(&page->lru, &ret_pages);
  801. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  802. }
  803. /*
  804. * Tag a zone as congested if all the dirty pages encountered were
  805. * backed by a congested BDI. In this case, reclaimers should just
  806. * back off and wait for congestion to clear because further reclaim
  807. * will encounter the same problem
  808. */
  809. if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
  810. zone_set_flag(zone, ZONE_CONGESTED);
  811. free_hot_cold_page_list(&free_pages, 1);
  812. list_splice(&ret_pages, page_list);
  813. count_vm_events(PGACTIVATE, pgactivate);
  814. *ret_nr_dirty += nr_dirty;
  815. *ret_nr_writeback += nr_writeback;
  816. return nr_reclaimed;
  817. }
  818. /*
  819. * Attempt to remove the specified page from its LRU. Only take this page
  820. * if it is of the appropriate PageActive status. Pages which are being
  821. * freed elsewhere are also ignored.
  822. *
  823. * page: page to consider
  824. * mode: one of the LRU isolation modes defined above
  825. *
  826. * returns 0 on success, -ve errno on failure.
  827. */
  828. int __isolate_lru_page(struct page *page, isolate_mode_t mode)
  829. {
  830. int ret = -EINVAL;
  831. /* Only take pages on the LRU. */
  832. if (!PageLRU(page))
  833. return ret;
  834. /* Do not give back unevictable pages for compaction */
  835. if (PageUnevictable(page))
  836. return ret;
  837. ret = -EBUSY;
  838. /*
  839. * To minimise LRU disruption, the caller can indicate that it only
  840. * wants to isolate pages it will be able to operate on without
  841. * blocking - clean pages for the most part.
  842. *
  843. * ISOLATE_CLEAN means that only clean pages should be isolated. This
  844. * is used by reclaim when it is cannot write to backing storage
  845. *
  846. * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
  847. * that it is possible to migrate without blocking
  848. */
  849. if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
  850. /* All the caller can do on PageWriteback is block */
  851. if (PageWriteback(page))
  852. return ret;
  853. if (PageDirty(page)) {
  854. struct address_space *mapping;
  855. /* ISOLATE_CLEAN means only clean pages */
  856. if (mode & ISOLATE_CLEAN)
  857. return ret;
  858. /*
  859. * Only pages without mappings or that have a
  860. * ->migratepage callback are possible to migrate
  861. * without blocking
  862. */
  863. mapping = page_mapping(page);
  864. if (mapping && !mapping->a_ops->migratepage)
  865. return ret;
  866. }
  867. }
  868. if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
  869. return ret;
  870. if (likely(get_page_unless_zero(page))) {
  871. /*
  872. * Be careful not to clear PageLRU until after we're
  873. * sure the page is not being freed elsewhere -- the
  874. * page release code relies on it.
  875. */
  876. ClearPageLRU(page);
  877. ret = 0;
  878. }
  879. return ret;
  880. }
  881. /*
  882. * zone->lru_lock is heavily contended. Some of the functions that
  883. * shrink the lists perform better by taking out a batch of pages
  884. * and working on them outside the LRU lock.
  885. *
  886. * For pagecache intensive workloads, this function is the hottest
  887. * spot in the kernel (apart from copy_*_user functions).
  888. *
  889. * Appropriate locks must be held before calling this function.
  890. *
  891. * @nr_to_scan: The number of pages to look through on the list.
  892. * @lruvec: The LRU vector to pull pages from.
  893. * @dst: The temp list to put pages on to.
  894. * @nr_scanned: The number of pages that were scanned.
  895. * @sc: The scan_control struct for this reclaim session
  896. * @mode: One of the LRU isolation modes
  897. * @lru: LRU list id for isolating
  898. *
  899. * returns how many pages were moved onto *@dst.
  900. */
  901. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  902. struct lruvec *lruvec, struct list_head *dst,
  903. unsigned long *nr_scanned, struct scan_control *sc,
  904. isolate_mode_t mode, enum lru_list lru)
  905. {
  906. struct list_head *src = &lruvec->lists[lru];
  907. unsigned long nr_taken = 0;
  908. unsigned long scan;
  909. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  910. struct page *page;
  911. int nr_pages;
  912. page = lru_to_page(src);
  913. prefetchw_prev_lru_page(page, src, flags);
  914. VM_BUG_ON(!PageLRU(page));
  915. switch (__isolate_lru_page(page, mode)) {
  916. case 0:
  917. nr_pages = hpage_nr_pages(page);
  918. mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
  919. list_move(&page->lru, dst);
  920. nr_taken += nr_pages;
  921. break;
  922. case -EBUSY:
  923. /* else it is being freed elsewhere */
  924. list_move(&page->lru, src);
  925. continue;
  926. default:
  927. BUG();
  928. }
  929. }
  930. *nr_scanned = scan;
  931. trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
  932. nr_taken, mode, is_file_lru(lru));
  933. return nr_taken;
  934. }
  935. /**
  936. * isolate_lru_page - tries to isolate a page from its LRU list
  937. * @page: page to isolate from its LRU list
  938. *
  939. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  940. * vmstat statistic corresponding to whatever LRU list the page was on.
  941. *
  942. * Returns 0 if the page was removed from an LRU list.
  943. * Returns -EBUSY if the page was not on an LRU list.
  944. *
  945. * The returned page will have PageLRU() cleared. If it was found on
  946. * the active list, it will have PageActive set. If it was found on
  947. * the unevictable list, it will have the PageUnevictable bit set. That flag
  948. * may need to be cleared by the caller before letting the page go.
  949. *
  950. * The vmstat statistic corresponding to the list on which the page was
  951. * found will be decremented.
  952. *
  953. * Restrictions:
  954. * (1) Must be called with an elevated refcount on the page. This is a
  955. * fundamentnal difference from isolate_lru_pages (which is called
  956. * without a stable reference).
  957. * (2) the lru_lock must not be held.
  958. * (3) interrupts must be enabled.
  959. */
  960. int isolate_lru_page(struct page *page)
  961. {
  962. int ret = -EBUSY;
  963. VM_BUG_ON(!page_count(page));
  964. if (PageLRU(page)) {
  965. struct zone *zone = page_zone(page);
  966. struct lruvec *lruvec;
  967. spin_lock_irq(&zone->lru_lock);
  968. lruvec = mem_cgroup_page_lruvec(page, zone);
  969. if (PageLRU(page)) {
  970. int lru = page_lru(page);
  971. get_page(page);
  972. ClearPageLRU(page);
  973. del_page_from_lru_list(page, lruvec, lru);
  974. ret = 0;
  975. }
  976. spin_unlock_irq(&zone->lru_lock);
  977. }
  978. return ret;
  979. }
  980. /*
  981. * Are there way too many processes in the direct reclaim path already?
  982. */
  983. static int too_many_isolated(struct zone *zone, int file,
  984. struct scan_control *sc)
  985. {
  986. unsigned long inactive, isolated;
  987. if (current_is_kswapd())
  988. return 0;
  989. if (!global_reclaim(sc))
  990. return 0;
  991. if (file) {
  992. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  993. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  994. } else {
  995. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  996. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  997. }
  998. return isolated > inactive;
  999. }
  1000. static noinline_for_stack void
  1001. putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
  1002. {
  1003. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1004. struct zone *zone = lruvec_zone(lruvec);
  1005. LIST_HEAD(pages_to_free);
  1006. /*
  1007. * Put back any unfreeable pages.
  1008. */
  1009. while (!list_empty(page_list)) {
  1010. struct page *page = lru_to_page(page_list);
  1011. int lru;
  1012. VM_BUG_ON(PageLRU(page));
  1013. list_del(&page->lru);
  1014. if (unlikely(!page_evictable(page, NULL))) {
  1015. spin_unlock_irq(&zone->lru_lock);
  1016. putback_lru_page(page);
  1017. spin_lock_irq(&zone->lru_lock);
  1018. continue;
  1019. }
  1020. lruvec = mem_cgroup_page_lruvec(page, zone);
  1021. SetPageLRU(page);
  1022. lru = page_lru(page);
  1023. add_page_to_lru_list(page, lruvec, lru);
  1024. if (is_active_lru(lru)) {
  1025. int file = is_file_lru(lru);
  1026. int numpages = hpage_nr_pages(page);
  1027. reclaim_stat->recent_rotated[file] += numpages;
  1028. }
  1029. if (put_page_testzero(page)) {
  1030. __ClearPageLRU(page);
  1031. __ClearPageActive(page);
  1032. del_page_from_lru_list(page, lruvec, lru);
  1033. if (unlikely(PageCompound(page))) {
  1034. spin_unlock_irq(&zone->lru_lock);
  1035. (*get_compound_page_dtor(page))(page);
  1036. spin_lock_irq(&zone->lru_lock);
  1037. } else
  1038. list_add(&page->lru, &pages_to_free);
  1039. }
  1040. }
  1041. /*
  1042. * To save our caller's stack, now use input list for pages to free.
  1043. */
  1044. list_splice(&pages_to_free, page_list);
  1045. }
  1046. /*
  1047. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  1048. * of reclaimed pages
  1049. */
  1050. static noinline_for_stack unsigned long
  1051. shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
  1052. struct scan_control *sc, enum lru_list lru)
  1053. {
  1054. LIST_HEAD(page_list);
  1055. unsigned long nr_scanned;
  1056. unsigned long nr_reclaimed = 0;
  1057. unsigned long nr_taken;
  1058. unsigned long nr_dirty = 0;
  1059. unsigned long nr_writeback = 0;
  1060. isolate_mode_t isolate_mode = 0;
  1061. int file = is_file_lru(lru);
  1062. struct zone *zone = lruvec_zone(lruvec);
  1063. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1064. while (unlikely(too_many_isolated(zone, file, sc))) {
  1065. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1066. /* We are about to die and free our memory. Return now. */
  1067. if (fatal_signal_pending(current))
  1068. return SWAP_CLUSTER_MAX;
  1069. }
  1070. lru_add_drain();
  1071. if (!sc->may_unmap)
  1072. isolate_mode |= ISOLATE_UNMAPPED;
  1073. if (!sc->may_writepage)
  1074. isolate_mode |= ISOLATE_CLEAN;
  1075. spin_lock_irq(&zone->lru_lock);
  1076. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
  1077. &nr_scanned, sc, isolate_mode, lru);
  1078. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1079. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1080. if (global_reclaim(sc)) {
  1081. zone->pages_scanned += nr_scanned;
  1082. if (current_is_kswapd())
  1083. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
  1084. else
  1085. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
  1086. }
  1087. spin_unlock_irq(&zone->lru_lock);
  1088. if (nr_taken == 0)
  1089. return 0;
  1090. nr_reclaimed = shrink_page_list(&page_list, zone, sc,
  1091. &nr_dirty, &nr_writeback);
  1092. spin_lock_irq(&zone->lru_lock);
  1093. reclaim_stat->recent_scanned[file] += nr_taken;
  1094. if (global_reclaim(sc)) {
  1095. if (current_is_kswapd())
  1096. __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
  1097. nr_reclaimed);
  1098. else
  1099. __count_zone_vm_events(PGSTEAL_DIRECT, zone,
  1100. nr_reclaimed);
  1101. }
  1102. putback_inactive_pages(lruvec, &page_list);
  1103. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1104. spin_unlock_irq(&zone->lru_lock);
  1105. free_hot_cold_page_list(&page_list, 1);
  1106. /*
  1107. * If reclaim is isolating dirty pages under writeback, it implies
  1108. * that the long-lived page allocation rate is exceeding the page
  1109. * laundering rate. Either the global limits are not being effective
  1110. * at throttling processes due to the page distribution throughout
  1111. * zones or there is heavy usage of a slow backing device. The
  1112. * only option is to throttle from reclaim context which is not ideal
  1113. * as there is no guarantee the dirtying process is throttled in the
  1114. * same way balance_dirty_pages() manages.
  1115. *
  1116. * This scales the number of dirty pages that must be under writeback
  1117. * before throttling depending on priority. It is a simple backoff
  1118. * function that has the most effect in the range DEF_PRIORITY to
  1119. * DEF_PRIORITY-2 which is the priority reclaim is considered to be
  1120. * in trouble and reclaim is considered to be in trouble.
  1121. *
  1122. * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
  1123. * DEF_PRIORITY-1 50% must be PageWriteback
  1124. * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
  1125. * ...
  1126. * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
  1127. * isolated page is PageWriteback
  1128. */
  1129. if (nr_writeback && nr_writeback >=
  1130. (nr_taken >> (DEF_PRIORITY - sc->priority)))
  1131. wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
  1132. trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
  1133. zone_idx(zone),
  1134. nr_scanned, nr_reclaimed,
  1135. sc->priority,
  1136. trace_shrink_flags(file));
  1137. return nr_reclaimed;
  1138. }
  1139. /*
  1140. * This moves pages from the active list to the inactive list.
  1141. *
  1142. * We move them the other way if the page is referenced by one or more
  1143. * processes, from rmap.
  1144. *
  1145. * If the pages are mostly unmapped, the processing is fast and it is
  1146. * appropriate to hold zone->lru_lock across the whole operation. But if
  1147. * the pages are mapped, the processing is slow (page_referenced()) so we
  1148. * should drop zone->lru_lock around each page. It's impossible to balance
  1149. * this, so instead we remove the pages from the LRU while processing them.
  1150. * It is safe to rely on PG_active against the non-LRU pages in here because
  1151. * nobody will play with that bit on a non-LRU page.
  1152. *
  1153. * The downside is that we have to touch page->_count against each page.
  1154. * But we had to alter page->flags anyway.
  1155. */
  1156. static void move_active_pages_to_lru(struct lruvec *lruvec,
  1157. struct list_head *list,
  1158. struct list_head *pages_to_free,
  1159. enum lru_list lru)
  1160. {
  1161. struct zone *zone = lruvec_zone(lruvec);
  1162. unsigned long pgmoved = 0;
  1163. struct page *page;
  1164. int nr_pages;
  1165. while (!list_empty(list)) {
  1166. page = lru_to_page(list);
  1167. lruvec = mem_cgroup_page_lruvec(page, zone);
  1168. VM_BUG_ON(PageLRU(page));
  1169. SetPageLRU(page);
  1170. nr_pages = hpage_nr_pages(page);
  1171. mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
  1172. list_move(&page->lru, &lruvec->lists[lru]);
  1173. pgmoved += nr_pages;
  1174. if (put_page_testzero(page)) {
  1175. __ClearPageLRU(page);
  1176. __ClearPageActive(page);
  1177. del_page_from_lru_list(page, lruvec, lru);
  1178. if (unlikely(PageCompound(page))) {
  1179. spin_unlock_irq(&zone->lru_lock);
  1180. (*get_compound_page_dtor(page))(page);
  1181. spin_lock_irq(&zone->lru_lock);
  1182. } else
  1183. list_add(&page->lru, pages_to_free);
  1184. }
  1185. }
  1186. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1187. if (!is_active_lru(lru))
  1188. __count_vm_events(PGDEACTIVATE, pgmoved);
  1189. }
  1190. static void shrink_active_list(unsigned long nr_to_scan,
  1191. struct lruvec *lruvec,
  1192. struct scan_control *sc,
  1193. enum lru_list lru)
  1194. {
  1195. unsigned long nr_taken;
  1196. unsigned long nr_scanned;
  1197. unsigned long vm_flags;
  1198. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1199. LIST_HEAD(l_active);
  1200. LIST_HEAD(l_inactive);
  1201. struct page *page;
  1202. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1203. unsigned long nr_rotated = 0;
  1204. isolate_mode_t isolate_mode = 0;
  1205. int file = is_file_lru(lru);
  1206. struct zone *zone = lruvec_zone(lruvec);
  1207. lru_add_drain();
  1208. if (!sc->may_unmap)
  1209. isolate_mode |= ISOLATE_UNMAPPED;
  1210. if (!sc->may_writepage)
  1211. isolate_mode |= ISOLATE_CLEAN;
  1212. spin_lock_irq(&zone->lru_lock);
  1213. nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
  1214. &nr_scanned, sc, isolate_mode, lru);
  1215. if (global_reclaim(sc))
  1216. zone->pages_scanned += nr_scanned;
  1217. reclaim_stat->recent_scanned[file] += nr_taken;
  1218. __count_zone_vm_events(PGREFILL, zone, nr_scanned);
  1219. __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
  1220. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1221. spin_unlock_irq(&zone->lru_lock);
  1222. while (!list_empty(&l_hold)) {
  1223. cond_resched();
  1224. page = lru_to_page(&l_hold);
  1225. list_del(&page->lru);
  1226. if (unlikely(!page_evictable(page, NULL))) {
  1227. putback_lru_page(page);
  1228. continue;
  1229. }
  1230. if (unlikely(buffer_heads_over_limit)) {
  1231. if (page_has_private(page) && trylock_page(page)) {
  1232. if (page_has_private(page))
  1233. try_to_release_page(page, 0);
  1234. unlock_page(page);
  1235. }
  1236. }
  1237. if (page_referenced(page, 0, sc->target_mem_cgroup,
  1238. &vm_flags)) {
  1239. nr_rotated += hpage_nr_pages(page);
  1240. /*
  1241. * Identify referenced, file-backed active pages and
  1242. * give them one more trip around the active list. So
  1243. * that executable code get better chances to stay in
  1244. * memory under moderate memory pressure. Anon pages
  1245. * are not likely to be evicted by use-once streaming
  1246. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1247. * so we ignore them here.
  1248. */
  1249. if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
  1250. list_add(&page->lru, &l_active);
  1251. continue;
  1252. }
  1253. }
  1254. ClearPageActive(page); /* we are de-activating */
  1255. list_add(&page->lru, &l_inactive);
  1256. }
  1257. /*
  1258. * Move pages back to the lru list.
  1259. */
  1260. spin_lock_irq(&zone->lru_lock);
  1261. /*
  1262. * Count referenced pages from currently used mappings as rotated,
  1263. * even though only some of them are actually re-activated. This
  1264. * helps balance scan pressure between file and anonymous pages in
  1265. * get_scan_ratio.
  1266. */
  1267. reclaim_stat->recent_rotated[file] += nr_rotated;
  1268. move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
  1269. move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
  1270. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1271. spin_unlock_irq(&zone->lru_lock);
  1272. free_hot_cold_page_list(&l_hold, 1);
  1273. }
  1274. #ifdef CONFIG_SWAP
  1275. static int inactive_anon_is_low_global(struct zone *zone)
  1276. {
  1277. unsigned long active, inactive;
  1278. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1279. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1280. if (inactive * zone->inactive_ratio < active)
  1281. return 1;
  1282. return 0;
  1283. }
  1284. /**
  1285. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1286. * @lruvec: LRU vector to check
  1287. *
  1288. * Returns true if the zone does not have enough inactive anon pages,
  1289. * meaning some active anon pages need to be deactivated.
  1290. */
  1291. static int inactive_anon_is_low(struct lruvec *lruvec)
  1292. {
  1293. /*
  1294. * If we don't have swap space, anonymous page deactivation
  1295. * is pointless.
  1296. */
  1297. if (!total_swap_pages)
  1298. return 0;
  1299. if (!mem_cgroup_disabled())
  1300. return mem_cgroup_inactive_anon_is_low(lruvec);
  1301. return inactive_anon_is_low_global(lruvec_zone(lruvec));
  1302. }
  1303. #else
  1304. static inline int inactive_anon_is_low(struct lruvec *lruvec)
  1305. {
  1306. return 0;
  1307. }
  1308. #endif
  1309. static int inactive_file_is_low_global(struct zone *zone)
  1310. {
  1311. unsigned long active, inactive;
  1312. active = zone_page_state(zone, NR_ACTIVE_FILE);
  1313. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1314. return (active > inactive);
  1315. }
  1316. /**
  1317. * inactive_file_is_low - check if file pages need to be deactivated
  1318. * @lruvec: LRU vector to check
  1319. *
  1320. * When the system is doing streaming IO, memory pressure here
  1321. * ensures that active file pages get deactivated, until more
  1322. * than half of the file pages are on the inactive list.
  1323. *
  1324. * Once we get to that situation, protect the system's working
  1325. * set from being evicted by disabling active file page aging.
  1326. *
  1327. * This uses a different ratio than the anonymous pages, because
  1328. * the page cache uses a use-once replacement algorithm.
  1329. */
  1330. static int inactive_file_is_low(struct lruvec *lruvec)
  1331. {
  1332. if (!mem_cgroup_disabled())
  1333. return mem_cgroup_inactive_file_is_low(lruvec);
  1334. return inactive_file_is_low_global(lruvec_zone(lruvec));
  1335. }
  1336. static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
  1337. {
  1338. if (is_file_lru(lru))
  1339. return inactive_file_is_low(lruvec);
  1340. else
  1341. return inactive_anon_is_low(lruvec);
  1342. }
  1343. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1344. struct lruvec *lruvec, struct scan_control *sc)
  1345. {
  1346. if (is_active_lru(lru)) {
  1347. if (inactive_list_is_low(lruvec, lru))
  1348. shrink_active_list(nr_to_scan, lruvec, sc, lru);
  1349. return 0;
  1350. }
  1351. return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
  1352. }
  1353. static int vmscan_swappiness(struct scan_control *sc)
  1354. {
  1355. if (global_reclaim(sc))
  1356. return vm_swappiness;
  1357. return mem_cgroup_swappiness(sc->target_mem_cgroup);
  1358. }
  1359. /*
  1360. * Determine how aggressively the anon and file LRU lists should be
  1361. * scanned. The relative value of each set of LRU lists is determined
  1362. * by looking at the fraction of the pages scanned we did rotate back
  1363. * onto the active list instead of evict.
  1364. *
  1365. * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
  1366. * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
  1367. */
  1368. static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
  1369. unsigned long *nr)
  1370. {
  1371. unsigned long anon, file, free;
  1372. unsigned long anon_prio, file_prio;
  1373. unsigned long ap, fp;
  1374. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  1375. u64 fraction[2], denominator;
  1376. enum lru_list lru;
  1377. int noswap = 0;
  1378. bool force_scan = false;
  1379. struct zone *zone = lruvec_zone(lruvec);
  1380. /*
  1381. * If the zone or memcg is small, nr[l] can be 0. This
  1382. * results in no scanning on this priority and a potential
  1383. * priority drop. Global direct reclaim can go to the next
  1384. * zone and tends to have no problems. Global kswapd is for
  1385. * zone balancing and it needs to scan a minimum amount. When
  1386. * reclaiming for a memcg, a priority drop can cause high
  1387. * latencies, so it's better to scan a minimum amount there as
  1388. * well.
  1389. */
  1390. if (current_is_kswapd() && zone->all_unreclaimable)
  1391. force_scan = true;
  1392. if (!global_reclaim(sc))
  1393. force_scan = true;
  1394. /* If we have no swap space, do not bother scanning anon pages. */
  1395. if (!sc->may_swap || (nr_swap_pages <= 0)) {
  1396. noswap = 1;
  1397. fraction[0] = 0;
  1398. fraction[1] = 1;
  1399. denominator = 1;
  1400. goto out;
  1401. }
  1402. anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
  1403. get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1404. file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
  1405. get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1406. if (global_reclaim(sc)) {
  1407. free = zone_page_state(zone, NR_FREE_PAGES);
  1408. /* If we have very few page cache pages,
  1409. force-scan anon pages. */
  1410. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1411. fraction[0] = 1;
  1412. fraction[1] = 0;
  1413. denominator = 1;
  1414. goto out;
  1415. }
  1416. }
  1417. /*
  1418. * With swappiness at 100, anonymous and file have the same priority.
  1419. * This scanning priority is essentially the inverse of IO cost.
  1420. */
  1421. anon_prio = vmscan_swappiness(sc);
  1422. file_prio = 200 - anon_prio;
  1423. /*
  1424. * OK, so we have swap space and a fair amount of page cache
  1425. * pages. We use the recently rotated / recently scanned
  1426. * ratios to determine how valuable each cache is.
  1427. *
  1428. * Because workloads change over time (and to avoid overflow)
  1429. * we keep these statistics as a floating average, which ends
  1430. * up weighing recent references more than old ones.
  1431. *
  1432. * anon in [0], file in [1]
  1433. */
  1434. spin_lock_irq(&zone->lru_lock);
  1435. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1436. reclaim_stat->recent_scanned[0] /= 2;
  1437. reclaim_stat->recent_rotated[0] /= 2;
  1438. }
  1439. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1440. reclaim_stat->recent_scanned[1] /= 2;
  1441. reclaim_stat->recent_rotated[1] /= 2;
  1442. }
  1443. /*
  1444. * The amount of pressure on anon vs file pages is inversely
  1445. * proportional to the fraction of recently scanned pages on
  1446. * each list that were recently referenced and in active use.
  1447. */
  1448. ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
  1449. ap /= reclaim_stat->recent_rotated[0] + 1;
  1450. fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
  1451. fp /= reclaim_stat->recent_rotated[1] + 1;
  1452. spin_unlock_irq(&zone->lru_lock);
  1453. fraction[0] = ap;
  1454. fraction[1] = fp;
  1455. denominator = ap + fp + 1;
  1456. out:
  1457. for_each_evictable_lru(lru) {
  1458. int file = is_file_lru(lru);
  1459. unsigned long scan;
  1460. scan = get_lru_size(lruvec, lru);
  1461. if (sc->priority || noswap || !vmscan_swappiness(sc)) {
  1462. scan >>= sc->priority;
  1463. if (!scan && force_scan)
  1464. scan = SWAP_CLUSTER_MAX;
  1465. scan = div64_u64(scan * fraction[file], denominator);
  1466. }
  1467. nr[lru] = scan;
  1468. }
  1469. }
  1470. /* Use reclaim/compaction for costly allocs or under memory pressure */
  1471. static bool in_reclaim_compaction(struct scan_control *sc)
  1472. {
  1473. if (COMPACTION_BUILD && sc->order &&
  1474. (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
  1475. sc->priority < DEF_PRIORITY - 2))
  1476. return true;
  1477. return false;
  1478. }
  1479. /*
  1480. * Reclaim/compaction is used for high-order allocation requests. It reclaims
  1481. * order-0 pages before compacting the zone. should_continue_reclaim() returns
  1482. * true if more pages should be reclaimed such that when the page allocator
  1483. * calls try_to_compact_zone() that it will have enough free pages to succeed.
  1484. * It will give up earlier than that if there is difficulty reclaiming pages.
  1485. */
  1486. static inline bool should_continue_reclaim(struct lruvec *lruvec,
  1487. unsigned long nr_reclaimed,
  1488. unsigned long nr_scanned,
  1489. struct scan_control *sc)
  1490. {
  1491. unsigned long pages_for_compaction;
  1492. unsigned long inactive_lru_pages;
  1493. /* If not in reclaim/compaction mode, stop */
  1494. if (!in_reclaim_compaction(sc))
  1495. return false;
  1496. /* Consider stopping depending on scan and reclaim activity */
  1497. if (sc->gfp_mask & __GFP_REPEAT) {
  1498. /*
  1499. * For __GFP_REPEAT allocations, stop reclaiming if the
  1500. * full LRU list has been scanned and we are still failing
  1501. * to reclaim pages. This full LRU scan is potentially
  1502. * expensive but a __GFP_REPEAT caller really wants to succeed
  1503. */
  1504. if (!nr_reclaimed && !nr_scanned)
  1505. return false;
  1506. } else {
  1507. /*
  1508. * For non-__GFP_REPEAT allocations which can presumably
  1509. * fail without consequence, stop if we failed to reclaim
  1510. * any pages from the last SWAP_CLUSTER_MAX number of
  1511. * pages that were scanned. This will return to the
  1512. * caller faster at the risk reclaim/compaction and
  1513. * the resulting allocation attempt fails
  1514. */
  1515. if (!nr_reclaimed)
  1516. return false;
  1517. }
  1518. /*
  1519. * If we have not reclaimed enough pages for compaction and the
  1520. * inactive lists are large enough, continue reclaiming
  1521. */
  1522. pages_for_compaction = (2UL << sc->order);
  1523. inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
  1524. if (nr_swap_pages > 0)
  1525. inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
  1526. if (sc->nr_reclaimed < pages_for_compaction &&
  1527. inactive_lru_pages > pages_for_compaction)
  1528. return true;
  1529. /* If compaction would go ahead or the allocation would succeed, stop */
  1530. switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
  1531. case COMPACT_PARTIAL:
  1532. case COMPACT_CONTINUE:
  1533. return false;
  1534. default:
  1535. return true;
  1536. }
  1537. }
  1538. /*
  1539. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1540. */
  1541. static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1542. {
  1543. unsigned long nr[NR_LRU_LISTS];
  1544. unsigned long nr_to_scan;
  1545. enum lru_list lru;
  1546. unsigned long nr_reclaimed, nr_scanned;
  1547. unsigned long nr_to_reclaim = sc->nr_to_reclaim;
  1548. struct blk_plug plug;
  1549. restart:
  1550. nr_reclaimed = 0;
  1551. nr_scanned = sc->nr_scanned;
  1552. get_scan_count(lruvec, sc, nr);
  1553. blk_start_plug(&plug);
  1554. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1555. nr[LRU_INACTIVE_FILE]) {
  1556. for_each_evictable_lru(lru) {
  1557. if (nr[lru]) {
  1558. nr_to_scan = min_t(unsigned long,
  1559. nr[lru], SWAP_CLUSTER_MAX);
  1560. nr[lru] -= nr_to_scan;
  1561. nr_reclaimed += shrink_list(lru, nr_to_scan,
  1562. lruvec, sc);
  1563. }
  1564. }
  1565. /*
  1566. * On large memory systems, scan >> priority can become
  1567. * really large. This is fine for the starting priority;
  1568. * we want to put equal scanning pressure on each zone.
  1569. * However, if the VM has a harder time of freeing pages,
  1570. * with multiple processes reclaiming pages, the total
  1571. * freeing target can get unreasonably large.
  1572. */
  1573. if (nr_reclaimed >= nr_to_reclaim &&
  1574. sc->priority < DEF_PRIORITY)
  1575. break;
  1576. }
  1577. blk_finish_plug(&plug);
  1578. sc->nr_reclaimed += nr_reclaimed;
  1579. /*
  1580. * Even if we did not try to evict anon pages at all, we want to
  1581. * rebalance the anon lru active/inactive ratio.
  1582. */
  1583. if (inactive_anon_is_low(lruvec))
  1584. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  1585. sc, LRU_ACTIVE_ANON);
  1586. /* reclaim/compaction might need reclaim to continue */
  1587. if (should_continue_reclaim(lruvec, nr_reclaimed,
  1588. sc->nr_scanned - nr_scanned, sc))
  1589. goto restart;
  1590. throttle_vm_writeout(sc->gfp_mask);
  1591. }
  1592. static void shrink_zone(struct zone *zone, struct scan_control *sc)
  1593. {
  1594. struct mem_cgroup *root = sc->target_mem_cgroup;
  1595. struct mem_cgroup_reclaim_cookie reclaim = {
  1596. .zone = zone,
  1597. .priority = sc->priority,
  1598. };
  1599. struct mem_cgroup *memcg;
  1600. memcg = mem_cgroup_iter(root, NULL, &reclaim);
  1601. do {
  1602. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  1603. shrink_lruvec(lruvec, sc);
  1604. /*
  1605. * Limit reclaim has historically picked one memcg and
  1606. * scanned it with decreasing priority levels until
  1607. * nr_to_reclaim had been reclaimed. This priority
  1608. * cycle is thus over after a single memcg.
  1609. *
  1610. * Direct reclaim and kswapd, on the other hand, have
  1611. * to scan all memory cgroups to fulfill the overall
  1612. * scan target for the zone.
  1613. */
  1614. if (!global_reclaim(sc)) {
  1615. mem_cgroup_iter_break(root, memcg);
  1616. break;
  1617. }
  1618. memcg = mem_cgroup_iter(root, memcg, &reclaim);
  1619. } while (memcg);
  1620. }
  1621. /* Returns true if compaction should go ahead for a high-order request */
  1622. static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
  1623. {
  1624. unsigned long balance_gap, watermark;
  1625. bool watermark_ok;
  1626. /* Do not consider compaction for orders reclaim is meant to satisfy */
  1627. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
  1628. return false;
  1629. /*
  1630. * Compaction takes time to run and there are potentially other
  1631. * callers using the pages just freed. Continue reclaiming until
  1632. * there is a buffer of free pages available to give compaction
  1633. * a reasonable chance of completing and allocating the page
  1634. */
  1635. balance_gap = min(low_wmark_pages(zone),
  1636. (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  1637. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  1638. watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
  1639. watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
  1640. /*
  1641. * If compaction is deferred, reclaim up to a point where
  1642. * compaction will have a chance of success when re-enabled
  1643. */
  1644. if (compaction_deferred(zone, sc->order))
  1645. return watermark_ok;
  1646. /* If compaction is not ready to start, keep reclaiming */
  1647. if (!compaction_suitable(zone, sc->order))
  1648. return false;
  1649. return watermark_ok;
  1650. }
  1651. /*
  1652. * This is the direct reclaim path, for page-allocating processes. We only
  1653. * try to reclaim pages from zones which will satisfy the caller's allocation
  1654. * request.
  1655. *
  1656. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1657. * Because:
  1658. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1659. * allocation or
  1660. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1661. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1662. * zone defense algorithm.
  1663. *
  1664. * If a zone is deemed to be full of pinned pages then just give it a light
  1665. * scan then give up on it.
  1666. *
  1667. * This function returns true if a zone is being reclaimed for a costly
  1668. * high-order allocation and compaction is ready to begin. This indicates to
  1669. * the caller that it should consider retrying the allocation instead of
  1670. * further reclaim.
  1671. */
  1672. static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
  1673. {
  1674. struct zoneref *z;
  1675. struct zone *zone;
  1676. unsigned long nr_soft_reclaimed;
  1677. unsigned long nr_soft_scanned;
  1678. bool aborted_reclaim = false;
  1679. /*
  1680. * If the number of buffer_heads in the machine exceeds the maximum
  1681. * allowed level, force direct reclaim to scan the highmem zone as
  1682. * highmem pages could be pinning lowmem pages storing buffer_heads
  1683. */
  1684. if (buffer_heads_over_limit)
  1685. sc->gfp_mask |= __GFP_HIGHMEM;
  1686. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1687. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1688. if (!populated_zone(zone))
  1689. continue;
  1690. /*
  1691. * Take care memory controller reclaiming has small influence
  1692. * to global LRU.
  1693. */
  1694. if (global_reclaim(sc)) {
  1695. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1696. continue;
  1697. if (zone->all_unreclaimable &&
  1698. sc->priority != DEF_PRIORITY)
  1699. continue; /* Let kswapd poll it */
  1700. if (COMPACTION_BUILD) {
  1701. /*
  1702. * If we already have plenty of memory free for
  1703. * compaction in this zone, don't free any more.
  1704. * Even though compaction is invoked for any
  1705. * non-zero order, only frequent costly order
  1706. * reclamation is disruptive enough to become a
  1707. * noticeable problem, like transparent huge
  1708. * page allocations.
  1709. */
  1710. if (compaction_ready(zone, sc)) {
  1711. aborted_reclaim = true;
  1712. continue;
  1713. }
  1714. }
  1715. /*
  1716. * This steals pages from memory cgroups over softlimit
  1717. * and returns the number of reclaimed pages and
  1718. * scanned pages. This works for global memory pressure
  1719. * and balancing, not for a memcg's limit.
  1720. */
  1721. nr_soft_scanned = 0;
  1722. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  1723. sc->order, sc->gfp_mask,
  1724. &nr_soft_scanned);
  1725. sc->nr_reclaimed += nr_soft_reclaimed;
  1726. sc->nr_scanned += nr_soft_scanned;
  1727. /* need some check for avoid more shrink_zone() */
  1728. }
  1729. shrink_zone(zone, sc);
  1730. }
  1731. return aborted_reclaim;
  1732. }
  1733. static bool zone_reclaimable(struct zone *zone)
  1734. {
  1735. return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
  1736. }
  1737. /* All zones in zonelist are unreclaimable? */
  1738. static bool all_unreclaimable(struct zonelist *zonelist,
  1739. struct scan_control *sc)
  1740. {
  1741. struct zoneref *z;
  1742. struct zone *zone;
  1743. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1744. gfp_zone(sc->gfp_mask), sc->nodemask) {
  1745. if (!populated_zone(zone))
  1746. continue;
  1747. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1748. continue;
  1749. if (!zone->all_unreclaimable)
  1750. return false;
  1751. }
  1752. return true;
  1753. }
  1754. /*
  1755. * This is the main entry point to direct page reclaim.
  1756. *
  1757. * If a full scan of the inactive list fails to free enough memory then we
  1758. * are "out of memory" and something needs to be killed.
  1759. *
  1760. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1761. * high - the zone may be full of dirty or under-writeback pages, which this
  1762. * caller can't do much about. We kick the writeback threads and take explicit
  1763. * naps in the hope that some of these pages can be written. But if the
  1764. * allocating task holds filesystem locks which prevent writeout this might not
  1765. * work, and the allocation attempt will fail.
  1766. *
  1767. * returns: 0, if no pages reclaimed
  1768. * else, the number of pages reclaimed
  1769. */
  1770. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1771. struct scan_control *sc,
  1772. struct shrink_control *shrink)
  1773. {
  1774. unsigned long total_scanned = 0;
  1775. struct reclaim_state *reclaim_state = current->reclaim_state;
  1776. struct zoneref *z;
  1777. struct zone *zone;
  1778. unsigned long writeback_threshold;
  1779. bool aborted_reclaim;
  1780. delayacct_freepages_start();
  1781. if (global_reclaim(sc))
  1782. count_vm_event(ALLOCSTALL);
  1783. do {
  1784. sc->nr_scanned = 0;
  1785. aborted_reclaim = shrink_zones(zonelist, sc);
  1786. /*
  1787. * Don't shrink slabs when reclaiming memory from
  1788. * over limit cgroups
  1789. */
  1790. if (global_reclaim(sc)) {
  1791. unsigned long lru_pages = 0;
  1792. for_each_zone_zonelist(zone, z, zonelist,
  1793. gfp_zone(sc->gfp_mask)) {
  1794. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1795. continue;
  1796. lru_pages += zone_reclaimable_pages(zone);
  1797. }
  1798. shrink_slab(shrink, sc->nr_scanned, lru_pages);
  1799. if (reclaim_state) {
  1800. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  1801. reclaim_state->reclaimed_slab = 0;
  1802. }
  1803. }
  1804. total_scanned += sc->nr_scanned;
  1805. if (sc->nr_reclaimed >= sc->nr_to_reclaim)
  1806. goto out;
  1807. /*
  1808. * Try to write back as many pages as we just scanned. This
  1809. * tends to cause slow streaming writers to write data to the
  1810. * disk smoothly, at the dirtying rate, which is nice. But
  1811. * that's undesirable in laptop mode, where we *want* lumpy
  1812. * writeout. So in laptop mode, write out the whole world.
  1813. */
  1814. writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
  1815. if (total_scanned > writeback_threshold) {
  1816. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
  1817. WB_REASON_TRY_TO_FREE_PAGES);
  1818. sc->may_writepage = 1;
  1819. }
  1820. /* Take a nap, wait for some writeback to complete */
  1821. if (!sc->hibernation_mode && sc->nr_scanned &&
  1822. sc->priority < DEF_PRIORITY - 2) {
  1823. struct zone *preferred_zone;
  1824. first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
  1825. &cpuset_current_mems_allowed,
  1826. &preferred_zone);
  1827. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
  1828. }
  1829. } while (--sc->priority >= 0);
  1830. out:
  1831. delayacct_freepages_end();
  1832. if (sc->nr_reclaimed)
  1833. return sc->nr_reclaimed;
  1834. /*
  1835. * As hibernation is going on, kswapd is freezed so that it can't mark
  1836. * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
  1837. * check.
  1838. */
  1839. if (oom_killer_disabled)
  1840. return 0;
  1841. /* Aborted reclaim to try compaction? don't OOM, then */
  1842. if (aborted_reclaim)
  1843. return 1;
  1844. /* top priority shrink_zones still had more to do? don't OOM, then */
  1845. if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
  1846. return 1;
  1847. return 0;
  1848. }
  1849. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1850. gfp_t gfp_mask, nodemask_t *nodemask)
  1851. {
  1852. unsigned long nr_reclaimed;
  1853. struct scan_control sc = {
  1854. .gfp_mask = gfp_mask,
  1855. .may_writepage = !laptop_mode,
  1856. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  1857. .may_unmap = 1,
  1858. .may_swap = 1,
  1859. .order = order,
  1860. .priority = DEF_PRIORITY,
  1861. .target_mem_cgroup = NULL,
  1862. .nodemask = nodemask,
  1863. };
  1864. struct shrink_control shrink = {
  1865. .gfp_mask = sc.gfp_mask,
  1866. };
  1867. trace_mm_vmscan_direct_reclaim_begin(order,
  1868. sc.may_writepage,
  1869. gfp_mask);
  1870. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  1871. trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
  1872. return nr_reclaimed;
  1873. }
  1874. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1875. unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
  1876. gfp_t gfp_mask, bool noswap,
  1877. struct zone *zone,
  1878. unsigned long *nr_scanned)
  1879. {
  1880. struct scan_control sc = {
  1881. .nr_scanned = 0,
  1882. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  1883. .may_writepage = !laptop_mode,
  1884. .may_unmap = 1,
  1885. .may_swap = !noswap,
  1886. .order = 0,
  1887. .priority = 0,
  1888. .target_mem_cgroup = memcg,
  1889. };
  1890. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  1891. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1892. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1893. trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
  1894. sc.may_writepage,
  1895. sc.gfp_mask);
  1896. /*
  1897. * NOTE: Although we can get the priority field, using it
  1898. * here is not a good idea, since it limits the pages we can scan.
  1899. * if we don't reclaim here, the shrink_zone from balance_pgdat
  1900. * will pick up pages from other mem cgroup's as well. We hack
  1901. * the priority and make it zero.
  1902. */
  1903. shrink_lruvec(lruvec, &sc);
  1904. trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
  1905. *nr_scanned = sc.nr_scanned;
  1906. return sc.nr_reclaimed;
  1907. }
  1908. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  1909. gfp_t gfp_mask,
  1910. bool noswap)
  1911. {
  1912. struct zonelist *zonelist;
  1913. unsigned long nr_reclaimed;
  1914. int nid;
  1915. struct scan_control sc = {
  1916. .may_writepage = !laptop_mode,
  1917. .may_unmap = 1,
  1918. .may_swap = !noswap,
  1919. .nr_to_reclaim = SWAP_CLUSTER_MAX,
  1920. .order = 0,
  1921. .priority = DEF_PRIORITY,
  1922. .target_mem_cgroup = memcg,
  1923. .nodemask = NULL, /* we don't care the placement */
  1924. .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1925. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
  1926. };
  1927. struct shrink_control shrink = {
  1928. .gfp_mask = sc.gfp_mask,
  1929. };
  1930. /*
  1931. * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
  1932. * take care of from where we get pages. So the node where we start the
  1933. * scan does not need to be the current node.
  1934. */
  1935. nid = mem_cgroup_select_victim_node(memcg);
  1936. zonelist = NODE_DATA(nid)->node_zonelists;
  1937. trace_mm_vmscan_memcg_reclaim_begin(0,
  1938. sc.may_writepage,
  1939. sc.gfp_mask);
  1940. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  1941. trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
  1942. return nr_reclaimed;
  1943. }
  1944. #endif
  1945. static void age_active_anon(struct zone *zone, struct scan_control *sc)
  1946. {
  1947. struct mem_cgroup *memcg;
  1948. if (!total_swap_pages)
  1949. return;
  1950. memcg = mem_cgroup_iter(NULL, NULL, NULL);
  1951. do {
  1952. struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
  1953. if (inactive_anon_is_low(lruvec))
  1954. shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
  1955. sc, LRU_ACTIVE_ANON);
  1956. memcg = mem_cgroup_iter(NULL, memcg, NULL);
  1957. } while (memcg);
  1958. }
  1959. /*
  1960. * pgdat_balanced is used when checking if a node is balanced for high-order
  1961. * allocations. Only zones that meet watermarks and are in a zone allowed
  1962. * by the callers classzone_idx are added to balanced_pages. The total of
  1963. * balanced pages must be at least 25% of the zones allowed by classzone_idx
  1964. * for the node to be considered balanced. Forcing all zones to be balanced
  1965. * for high orders can cause excessive reclaim when there are imbalanced zones.
  1966. * The choice of 25% is due to
  1967. * o a 16M DMA zone that is balanced will not balance a zone on any
  1968. * reasonable sized machine
  1969. * o On all other machines, the top zone must be at least a reasonable
  1970. * percentage of the middle zones. For example, on 32-bit x86, highmem
  1971. * would need to be at least 256M for it to be balance a whole node.
  1972. * Similarly, on x86-64 the Normal zone would need to be at least 1G
  1973. * to balance a node on its own. These seemed like reasonable ratios.
  1974. */
  1975. static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
  1976. int classzone_idx)
  1977. {
  1978. unsigned long present_pages = 0;
  1979. int i;
  1980. for (i = 0; i <= classzone_idx; i++)
  1981. present_pages += pgdat->node_zones[i].present_pages;
  1982. /* A special case here: if zone has no page, we think it's balanced */
  1983. return balanced_pages >= (present_pages >> 2);
  1984. }
  1985. /* is kswapd sleeping prematurely? */
  1986. static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
  1987. int classzone_idx)
  1988. {
  1989. int i;
  1990. unsigned long balanced = 0;
  1991. bool all_zones_ok = true;
  1992. /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
  1993. if (remaining)
  1994. return true;
  1995. /* Check the watermark levels */
  1996. for (i = 0; i <= classzone_idx; i++) {
  1997. struct zone *zone = pgdat->node_zones + i;
  1998. if (!populated_zone(zone))
  1999. continue;
  2000. /*
  2001. * balance_pgdat() skips over all_unreclaimable after
  2002. * DEF_PRIORITY. Effectively, it considers them balanced so
  2003. * they must be considered balanced here as well if kswapd
  2004. * is to sleep
  2005. */
  2006. if (zone->all_unreclaimable) {
  2007. balanced += zone->present_pages;
  2008. continue;
  2009. }
  2010. if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
  2011. i, 0))
  2012. all_zones_ok = false;
  2013. else
  2014. balanced += zone->present_pages;
  2015. }
  2016. /*
  2017. * For high-order requests, the balanced zones must contain at least
  2018. * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
  2019. * must be balanced
  2020. */
  2021. if (order)
  2022. return !pgdat_balanced(pgdat, balanced, classzone_idx);
  2023. else
  2024. return !all_zones_ok;
  2025. }
  2026. /*
  2027. * For kswapd, balance_pgdat() will work across all this node's zones until
  2028. * they are all at high_wmark_pages(zone).
  2029. *
  2030. * Returns the final order kswapd was reclaiming at
  2031. *
  2032. * There is special handling here for zones which are full of pinned pages.
  2033. * This can happen if the pages are all mlocked, or if they are all used by
  2034. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  2035. * What we do is to detect the case where all pages in the zone have been
  2036. * scanned twice and there has been zero successful reclaim. Mark the zone as
  2037. * dead and from now on, only perform a short scan. Basically we're polling
  2038. * the zone for when the problem goes away.
  2039. *
  2040. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  2041. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  2042. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  2043. * lower zones regardless of the number of free pages in the lower zones. This
  2044. * interoperates with the page allocator fallback scheme to ensure that aging
  2045. * of pages is balanced across the zones.
  2046. */
  2047. static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
  2048. int *classzone_idx)
  2049. {
  2050. int all_zones_ok;
  2051. unsigned long balanced;
  2052. int i;
  2053. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  2054. unsigned long total_scanned;
  2055. struct reclaim_state *reclaim_state = current->reclaim_state;
  2056. unsigned long nr_soft_reclaimed;
  2057. unsigned long nr_soft_scanned;
  2058. struct scan_control sc = {
  2059. .gfp_mask = GFP_KERNEL,
  2060. .may_unmap = 1,
  2061. .may_swap = 1,
  2062. /*
  2063. * kswapd doesn't want to be bailed out while reclaim. because
  2064. * we want to put equal scanning pressure on each zone.
  2065. */
  2066. .nr_to_reclaim = ULONG_MAX,
  2067. .order = order,
  2068. .target_mem_cgroup = NULL,
  2069. };
  2070. struct shrink_control shrink = {
  2071. .gfp_mask = sc.gfp_mask,
  2072. };
  2073. loop_again:
  2074. total_scanned = 0;
  2075. sc.priority = DEF_PRIORITY;
  2076. sc.nr_reclaimed = 0;
  2077. sc.may_writepage = !laptop_mode;
  2078. count_vm_event(PAGEOUTRUN);
  2079. do {
  2080. unsigned long lru_pages = 0;
  2081. int has_under_min_watermark_zone = 0;
  2082. all_zones_ok = 1;
  2083. balanced = 0;
  2084. /*
  2085. * Scan in the highmem->dma direction for the highest
  2086. * zone which needs scanning
  2087. */
  2088. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  2089. struct zone *zone = pgdat->node_zones + i;
  2090. if (!populated_zone(zone))
  2091. continue;
  2092. if (zone->all_unreclaimable &&
  2093. sc.priority != DEF_PRIORITY)
  2094. continue;
  2095. /*
  2096. * Do some background aging of the anon list, to give
  2097. * pages a chance to be referenced before reclaiming.
  2098. */
  2099. age_active_anon(zone, &sc);
  2100. /*
  2101. * If the number of buffer_heads in the machine
  2102. * exceeds the maximum allowed level and this node
  2103. * has a highmem zone, force kswapd to reclaim from
  2104. * it to relieve lowmem pressure.
  2105. */
  2106. if (buffer_heads_over_limit && is_highmem_idx(i)) {
  2107. end_zone = i;
  2108. break;
  2109. }
  2110. if (!zone_watermark_ok_safe(zone, order,
  2111. high_wmark_pages(zone), 0, 0)) {
  2112. end_zone = i;
  2113. break;
  2114. } else {
  2115. /* If balanced, clear the congested flag */
  2116. zone_clear_flag(zone, ZONE_CONGESTED);
  2117. }
  2118. }
  2119. if (i < 0)
  2120. goto out;
  2121. for (i = 0; i <= end_zone; i++) {
  2122. struct zone *zone = pgdat->node_zones + i;
  2123. lru_pages += zone_reclaimable_pages(zone);
  2124. }
  2125. /*
  2126. * Now scan the zone in the dma->highmem direction, stopping
  2127. * at the last zone which needs scanning.
  2128. *
  2129. * We do this because the page allocator works in the opposite
  2130. * direction. This prevents the page allocator from allocating
  2131. * pages behind kswapd's direction of progress, which would
  2132. * cause too much scanning of the lower zones.
  2133. */
  2134. for (i = 0; i <= end_zone; i++) {
  2135. struct zone *zone = pgdat->node_zones + i;
  2136. int nr_slab, testorder;
  2137. unsigned long balance_gap;
  2138. if (!populated_zone(zone))
  2139. continue;
  2140. if (zone->all_unreclaimable &&
  2141. sc.priority != DEF_PRIORITY)
  2142. continue;
  2143. sc.nr_scanned = 0;
  2144. nr_soft_scanned = 0;
  2145. /*
  2146. * Call soft limit reclaim before calling shrink_zone.
  2147. */
  2148. nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
  2149. order, sc.gfp_mask,
  2150. &nr_soft_scanned);
  2151. sc.nr_reclaimed += nr_soft_reclaimed;
  2152. total_scanned += nr_soft_scanned;
  2153. /*
  2154. * We put equal pressure on every zone, unless
  2155. * one zone has way too many pages free
  2156. * already. The "too many pages" is defined
  2157. * as the high wmark plus a "gap" where the
  2158. * gap is either the low watermark or 1%
  2159. * of the zone, whichever is smaller.
  2160. */
  2161. balance_gap = min(low_wmark_pages(zone),
  2162. (zone->present_pages +
  2163. KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
  2164. KSWAPD_ZONE_BALANCE_GAP_RATIO);
  2165. /*
  2166. * Kswapd reclaims only single pages with compaction
  2167. * enabled. Trying too hard to reclaim until contiguous
  2168. * free pages have become available can hurt performance
  2169. * by evicting too much useful data from memory.
  2170. * Do not reclaim more than needed for compaction.
  2171. */
  2172. testorder = order;
  2173. if (COMPACTION_BUILD && order &&
  2174. compaction_suitable(zone, order) !=
  2175. COMPACT_SKIPPED)
  2176. testorder = 0;
  2177. if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
  2178. !zone_watermark_ok_safe(zone, testorder,
  2179. high_wmark_pages(zone) + balance_gap,
  2180. end_zone, 0)) {
  2181. shrink_zone(zone, &sc);
  2182. reclaim_state->reclaimed_slab = 0;
  2183. nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
  2184. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  2185. total_scanned += sc.nr_scanned;
  2186. if (nr_slab == 0 && !zone_reclaimable(zone))
  2187. zone->all_unreclaimable = 1;
  2188. }
  2189. /*
  2190. * If we've done a decent amount of scanning and
  2191. * the reclaim ratio is low, start doing writepage
  2192. * even in laptop mode
  2193. */
  2194. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  2195. total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
  2196. sc.may_writepage = 1;
  2197. if (zone->all_unreclaimable) {
  2198. if (end_zone && end_zone == i)
  2199. end_zone--;
  2200. continue;
  2201. }
  2202. if (!zone_watermark_ok_safe(zone, testorder,
  2203. high_wmark_pages(zone), end_zone, 0)) {
  2204. all_zones_ok = 0;
  2205. /*
  2206. * We are still under min water mark. This
  2207. * means that we have a GFP_ATOMIC allocation
  2208. * failure risk. Hurry up!
  2209. */
  2210. if (!zone_watermark_ok_safe(zone, order,
  2211. min_wmark_pages(zone), end_zone, 0))
  2212. has_under_min_watermark_zone = 1;
  2213. } else {
  2214. /*
  2215. * If a zone reaches its high watermark,
  2216. * consider it to be no longer congested. It's
  2217. * possible there are dirty pages backed by
  2218. * congested BDIs but as pressure is relieved,
  2219. * speculatively avoid congestion waits
  2220. */
  2221. zone_clear_flag(zone, ZONE_CONGESTED);
  2222. if (i <= *classzone_idx)
  2223. balanced += zone->present_pages;
  2224. }
  2225. }
  2226. if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
  2227. break; /* kswapd: all done */
  2228. /*
  2229. * OK, kswapd is getting into trouble. Take a nap, then take
  2230. * another pass across the zones.
  2231. */
  2232. if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
  2233. if (has_under_min_watermark_zone)
  2234. count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
  2235. else
  2236. congestion_wait(BLK_RW_ASYNC, HZ/10);
  2237. }
  2238. /*
  2239. * We do this so kswapd doesn't build up large priorities for
  2240. * example when it is freeing in parallel with allocators. It
  2241. * matches the direct reclaim path behaviour in terms of impact
  2242. * on zone->*_priority.
  2243. */
  2244. if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
  2245. break;
  2246. } while (--sc.priority >= 0);
  2247. out:
  2248. /*
  2249. * order-0: All zones must meet high watermark for a balanced node
  2250. * high-order: Balanced zones must make up at least 25% of the node
  2251. * for the node to be balanced
  2252. */
  2253. if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
  2254. cond_resched();
  2255. try_to_freeze();
  2256. /*
  2257. * Fragmentation may mean that the system cannot be
  2258. * rebalanced for high-order allocations in all zones.
  2259. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
  2260. * it means the zones have been fully scanned and are still
  2261. * not balanced. For high-order allocations, there is
  2262. * little point trying all over again as kswapd may
  2263. * infinite loop.
  2264. *
  2265. * Instead, recheck all watermarks at order-0 as they
  2266. * are the most important. If watermarks are ok, kswapd will go
  2267. * back to sleep. High-order users can still perform direct
  2268. * reclaim if they wish.
  2269. */
  2270. if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
  2271. order = sc.order = 0;
  2272. goto loop_again;
  2273. }
  2274. /*
  2275. * If kswapd was reclaiming at a higher order, it has the option of
  2276. * sleeping without all zones being balanced. Before it does, it must
  2277. * ensure that the watermarks for order-0 on *all* zones are met and
  2278. * that the congestion flags are cleared. The congestion flag must
  2279. * be cleared as kswapd is the only mechanism that clears the flag
  2280. * and it is potentially going to sleep here.
  2281. */
  2282. if (order) {
  2283. int zones_need_compaction = 1;
  2284. for (i = 0; i <= end_zone; i++) {
  2285. struct zone *zone = pgdat->node_zones + i;
  2286. if (!populated_zone(zone))
  2287. continue;
  2288. if (zone->all_unreclaimable &&
  2289. sc.priority != DEF_PRIORITY)
  2290. continue;
  2291. /* Would compaction fail due to lack of free memory? */
  2292. if (COMPACTION_BUILD &&
  2293. compaction_suitable(zone, order) == COMPACT_SKIPPED)
  2294. goto loop_again;
  2295. /* Confirm the zone is balanced for order-0 */
  2296. if (!zone_watermark_ok(zone, 0,
  2297. high_wmark_pages(zone), 0, 0)) {
  2298. order = sc.order = 0;
  2299. goto loop_again;
  2300. }
  2301. /* Check if the memory needs to be defragmented. */
  2302. if (zone_watermark_ok(zone, order,
  2303. low_wmark_pages(zone), *classzone_idx, 0))
  2304. zones_need_compaction = 0;
  2305. /* If balanced, clear the congested flag */
  2306. zone_clear_flag(zone, ZONE_CONGESTED);
  2307. }
  2308. if (zones_need_compaction)
  2309. compact_pgdat(pgdat, order);
  2310. }
  2311. /*
  2312. * Return the order we were reclaiming at so sleeping_prematurely()
  2313. * makes a decision on the order we were last reclaiming at. However,
  2314. * if another caller entered the allocator slow path while kswapd
  2315. * was awake, order will remain at the higher level
  2316. */
  2317. *classzone_idx = end_zone;
  2318. return order;
  2319. }
  2320. static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  2321. {
  2322. long remaining = 0;
  2323. DEFINE_WAIT(wait);
  2324. if (freezing(current) || kthread_should_stop())
  2325. return;
  2326. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2327. /* Try to sleep for a short interval */
  2328. if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
  2329. remaining = schedule_timeout(HZ/10);
  2330. finish_wait(&pgdat->kswapd_wait, &wait);
  2331. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  2332. }
  2333. /*
  2334. * After a short sleep, check if it was a premature sleep. If not, then
  2335. * go fully to sleep until explicitly woken up.
  2336. */
  2337. if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
  2338. trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
  2339. /*
  2340. * vmstat counters are not perfectly accurate and the estimated
  2341. * value for counters such as NR_FREE_PAGES can deviate from the
  2342. * true value by nr_online_cpus * threshold. To avoid the zone
  2343. * watermarks being breached while under pressure, we reduce the
  2344. * per-cpu vmstat threshold while kswapd is awake and restore
  2345. * them before going back to sleep.
  2346. */
  2347. set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
  2348. if (!kthread_should_stop())
  2349. schedule();
  2350. set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
  2351. } else {
  2352. if (remaining)
  2353. count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
  2354. else
  2355. count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
  2356. }
  2357. finish_wait(&pgdat->kswapd_wait, &wait);
  2358. }
  2359. /*
  2360. * The background pageout daemon, started as a kernel thread
  2361. * from the init process.
  2362. *
  2363. * This basically trickles out pages so that we have _some_
  2364. * free memory available even if there is no other activity
  2365. * that frees anything up. This is needed for things like routing
  2366. * etc, where we otherwise might have all activity going on in
  2367. * asynchronous contexts that cannot page things out.
  2368. *
  2369. * If there are applications that are active memory-allocators
  2370. * (most normal use), this basically shouldn't matter.
  2371. */
  2372. static int kswapd(void *p)
  2373. {
  2374. unsigned long order, new_order;
  2375. unsigned balanced_order;
  2376. int classzone_idx, new_classzone_idx;
  2377. int balanced_classzone_idx;
  2378. pg_data_t *pgdat = (pg_data_t*)p;
  2379. struct task_struct *tsk = current;
  2380. struct reclaim_state reclaim_state = {
  2381. .reclaimed_slab = 0,
  2382. };
  2383. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  2384. lockdep_set_current_reclaim_state(GFP_KERNEL);
  2385. if (!cpumask_empty(cpumask))
  2386. set_cpus_allowed_ptr(tsk, cpumask);
  2387. current->reclaim_state = &reclaim_state;
  2388. /*
  2389. * Tell the memory management that we're a "memory allocator",
  2390. * and that if we need more memory we should get access to it
  2391. * regardless (see "__alloc_pages()"). "kswapd" should
  2392. * never get caught in the normal page freeing logic.
  2393. *
  2394. * (Kswapd normally doesn't need memory anyway, but sometimes
  2395. * you need a small amount of memory in order to be able to
  2396. * page out something else, and this flag essentially protects
  2397. * us from recursively trying to free more memory as we're
  2398. * trying to free the first piece of memory in the first place).
  2399. */
  2400. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  2401. set_freezable();
  2402. order = new_order = 0;
  2403. balanced_order = 0;
  2404. classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
  2405. balanced_classzone_idx = classzone_idx;
  2406. for ( ; ; ) {
  2407. int ret;
  2408. /*
  2409. * If the last balance_pgdat was unsuccessful it's unlikely a
  2410. * new request of a similar or harder type will succeed soon
  2411. * so consider going to sleep on the basis we reclaimed at
  2412. */
  2413. if (balanced_classzone_idx >= new_classzone_idx &&
  2414. balanced_order == new_order) {
  2415. new_order = pgdat->kswapd_max_order;
  2416. new_classzone_idx = pgdat->classzone_idx;
  2417. pgdat->kswapd_max_order = 0;
  2418. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2419. }
  2420. if (order < new_order || classzone_idx > new_classzone_idx) {
  2421. /*
  2422. * Don't sleep if someone wants a larger 'order'
  2423. * allocation or has tigher zone constraints
  2424. */
  2425. order = new_order;
  2426. classzone_idx = new_classzone_idx;
  2427. } else {
  2428. kswapd_try_to_sleep(pgdat, balanced_order,
  2429. balanced_classzone_idx);
  2430. order = pgdat->kswapd_max_order;
  2431. classzone_idx = pgdat->classzone_idx;
  2432. new_order = order;
  2433. new_classzone_idx = classzone_idx;
  2434. pgdat->kswapd_max_order = 0;
  2435. pgdat->classzone_idx = pgdat->nr_zones - 1;
  2436. }
  2437. ret = try_to_freeze();
  2438. if (kthread_should_stop())
  2439. break;
  2440. /*
  2441. * We can speed up thawing tasks if we don't call balance_pgdat
  2442. * after returning from the refrigerator
  2443. */
  2444. if (!ret) {
  2445. trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
  2446. balanced_classzone_idx = classzone_idx;
  2447. balanced_order = balance_pgdat(pgdat, order,
  2448. &balanced_classzone_idx);
  2449. }
  2450. }
  2451. return 0;
  2452. }
  2453. /*
  2454. * A zone is low on free memory, so wake its kswapd task to service it.
  2455. */
  2456. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  2457. {
  2458. pg_data_t *pgdat;
  2459. if (!populated_zone(zone))
  2460. return;
  2461. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  2462. return;
  2463. pgdat = zone->zone_pgdat;
  2464. if (pgdat->kswapd_max_order < order) {
  2465. pgdat->kswapd_max_order = order;
  2466. pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
  2467. }
  2468. if (!waitqueue_active(&pgdat->kswapd_wait))
  2469. return;
  2470. if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
  2471. return;
  2472. trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
  2473. wake_up_interruptible(&pgdat->kswapd_wait);
  2474. }
  2475. /*
  2476. * The reclaimable count would be mostly accurate.
  2477. * The less reclaimable pages may be
  2478. * - mlocked pages, which will be moved to unevictable list when encountered
  2479. * - mapped pages, which may require several travels to be reclaimed
  2480. * - dirty pages, which is not "instantly" reclaimable
  2481. */
  2482. unsigned long global_reclaimable_pages(void)
  2483. {
  2484. int nr;
  2485. nr = global_page_state(NR_ACTIVE_FILE) +
  2486. global_page_state(NR_INACTIVE_FILE);
  2487. if (nr_swap_pages > 0)
  2488. nr += global_page_state(NR_ACTIVE_ANON) +
  2489. global_page_state(NR_INACTIVE_ANON);
  2490. return nr;
  2491. }
  2492. unsigned long zone_reclaimable_pages(struct zone *zone)
  2493. {
  2494. int nr;
  2495. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  2496. zone_page_state(zone, NR_INACTIVE_FILE);
  2497. if (nr_swap_pages > 0)
  2498. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  2499. zone_page_state(zone, NR_INACTIVE_ANON);
  2500. return nr;
  2501. }
  2502. #ifdef CONFIG_HIBERNATION
  2503. /*
  2504. * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  2505. * freed pages.
  2506. *
  2507. * Rather than trying to age LRUs the aim is to preserve the overall
  2508. * LRU order by reclaiming preferentially
  2509. * inactive > active > active referenced > active mapped
  2510. */
  2511. unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
  2512. {
  2513. struct reclaim_state reclaim_state;
  2514. struct scan_control sc = {
  2515. .gfp_mask = GFP_HIGHUSER_MOVABLE,
  2516. .may_swap = 1,
  2517. .may_unmap = 1,
  2518. .may_writepage = 1,
  2519. .nr_to_reclaim = nr_to_reclaim,
  2520. .hibernation_mode = 1,
  2521. .order = 0,
  2522. .priority = DEF_PRIORITY,
  2523. };
  2524. struct shrink_control shrink = {
  2525. .gfp_mask = sc.gfp_mask,
  2526. };
  2527. struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
  2528. struct task_struct *p = current;
  2529. unsigned long nr_reclaimed;
  2530. p->flags |= PF_MEMALLOC;
  2531. lockdep_set_current_reclaim_state(sc.gfp_mask);
  2532. reclaim_state.reclaimed_slab = 0;
  2533. p->reclaim_state = &reclaim_state;
  2534. nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
  2535. p->reclaim_state = NULL;
  2536. lockdep_clear_current_reclaim_state();
  2537. p->flags &= ~PF_MEMALLOC;
  2538. return nr_reclaimed;
  2539. }
  2540. #endif /* CONFIG_HIBERNATION */
  2541. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2542. not required for correctness. So if the last cpu in a node goes
  2543. away, we get changed to run anywhere: as the first one comes back,
  2544. restore their cpu bindings. */
  2545. static int __devinit cpu_callback(struct notifier_block *nfb,
  2546. unsigned long action, void *hcpu)
  2547. {
  2548. int nid;
  2549. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2550. for_each_node_state(nid, N_HIGH_MEMORY) {
  2551. pg_data_t *pgdat = NODE_DATA(nid);
  2552. const struct cpumask *mask;
  2553. mask = cpumask_of_node(pgdat->node_id);
  2554. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2555. /* One of our CPUs online: restore mask */
  2556. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2557. }
  2558. }
  2559. return NOTIFY_OK;
  2560. }
  2561. /*
  2562. * This kswapd start function will be called by init and node-hot-add.
  2563. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2564. */
  2565. int kswapd_run(int nid)
  2566. {
  2567. pg_data_t *pgdat = NODE_DATA(nid);
  2568. int ret = 0;
  2569. if (pgdat->kswapd)
  2570. return 0;
  2571. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2572. if (IS_ERR(pgdat->kswapd)) {
  2573. /* failure at boot is fatal */
  2574. BUG_ON(system_state == SYSTEM_BOOTING);
  2575. printk("Failed to start kswapd on node %d\n",nid);
  2576. ret = -1;
  2577. }
  2578. return ret;
  2579. }
  2580. /*
  2581. * Called by memory hotplug when all memory in a node is offlined. Caller must
  2582. * hold lock_memory_hotplug().
  2583. */
  2584. void kswapd_stop(int nid)
  2585. {
  2586. struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
  2587. if (kswapd) {
  2588. kthread_stop(kswapd);
  2589. NODE_DATA(nid)->kswapd = NULL;
  2590. }
  2591. }
  2592. static int __init kswapd_init(void)
  2593. {
  2594. int nid;
  2595. swap_setup();
  2596. for_each_node_state(nid, N_HIGH_MEMORY)
  2597. kswapd_run(nid);
  2598. hotcpu_notifier(cpu_callback, 0);
  2599. return 0;
  2600. }
  2601. module_init(kswapd_init)
  2602. #ifdef CONFIG_NUMA
  2603. /*
  2604. * Zone reclaim mode
  2605. *
  2606. * If non-zero call zone_reclaim when the number of free pages falls below
  2607. * the watermarks.
  2608. */
  2609. int zone_reclaim_mode __read_mostly;
  2610. #define RECLAIM_OFF 0
  2611. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2612. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2613. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2614. /*
  2615. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2616. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2617. * a zone.
  2618. */
  2619. #define ZONE_RECLAIM_PRIORITY 4
  2620. /*
  2621. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2622. * occur.
  2623. */
  2624. int sysctl_min_unmapped_ratio = 1;
  2625. /*
  2626. * If the number of slab pages in a zone grows beyond this percentage then
  2627. * slab reclaim needs to occur.
  2628. */
  2629. int sysctl_min_slab_ratio = 5;
  2630. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  2631. {
  2632. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  2633. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  2634. zone_page_state(zone, NR_ACTIVE_FILE);
  2635. /*
  2636. * It's possible for there to be more file mapped pages than
  2637. * accounted for by the pages on the file LRU lists because
  2638. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  2639. */
  2640. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  2641. }
  2642. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  2643. static long zone_pagecache_reclaimable(struct zone *zone)
  2644. {
  2645. long nr_pagecache_reclaimable;
  2646. long delta = 0;
  2647. /*
  2648. * If RECLAIM_SWAP is set, then all file pages are considered
  2649. * potentially reclaimable. Otherwise, we have to worry about
  2650. * pages like swapcache and zone_unmapped_file_pages() provides
  2651. * a better estimate
  2652. */
  2653. if (zone_reclaim_mode & RECLAIM_SWAP)
  2654. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  2655. else
  2656. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  2657. /* If we can't clean pages, remove dirty pages from consideration */
  2658. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  2659. delta += zone_page_state(zone, NR_FILE_DIRTY);
  2660. /* Watch for any possible underflows due to delta */
  2661. if (unlikely(delta > nr_pagecache_reclaimable))
  2662. delta = nr_pagecache_reclaimable;
  2663. return nr_pagecache_reclaimable - delta;
  2664. }
  2665. /*
  2666. * Try to free up some pages from this zone through reclaim.
  2667. */
  2668. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2669. {
  2670. /* Minimum pages needed in order to stay on node */
  2671. const unsigned long nr_pages = 1 << order;
  2672. struct task_struct *p = current;
  2673. struct reclaim_state reclaim_state;
  2674. struct scan_control sc = {
  2675. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  2676. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  2677. .may_swap = 1,
  2678. .nr_to_reclaim = max_t(unsigned long, nr_pages,
  2679. SWAP_CLUSTER_MAX),
  2680. .gfp_mask = gfp_mask,
  2681. .order = order,
  2682. .priority = ZONE_RECLAIM_PRIORITY,
  2683. };
  2684. struct shrink_control shrink = {
  2685. .gfp_mask = sc.gfp_mask,
  2686. };
  2687. unsigned long nr_slab_pages0, nr_slab_pages1;
  2688. cond_resched();
  2689. /*
  2690. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  2691. * and we also need to be able to write out pages for RECLAIM_WRITE
  2692. * and RECLAIM_SWAP.
  2693. */
  2694. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  2695. lockdep_set_current_reclaim_state(gfp_mask);
  2696. reclaim_state.reclaimed_slab = 0;
  2697. p->reclaim_state = &reclaim_state;
  2698. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  2699. /*
  2700. * Free memory by calling shrink zone with increasing
  2701. * priorities until we have enough memory freed.
  2702. */
  2703. do {
  2704. shrink_zone(zone, &sc);
  2705. } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
  2706. }
  2707. nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2708. if (nr_slab_pages0 > zone->min_slab_pages) {
  2709. /*
  2710. * shrink_slab() does not currently allow us to determine how
  2711. * many pages were freed in this zone. So we take the current
  2712. * number of slab pages and shake the slab until it is reduced
  2713. * by the same nr_pages that we used for reclaiming unmapped
  2714. * pages.
  2715. *
  2716. * Note that shrink_slab will free memory on all zones and may
  2717. * take a long time.
  2718. */
  2719. for (;;) {
  2720. unsigned long lru_pages = zone_reclaimable_pages(zone);
  2721. /* No reclaimable slab or very low memory pressure */
  2722. if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
  2723. break;
  2724. /* Freed enough memory */
  2725. nr_slab_pages1 = zone_page_state(zone,
  2726. NR_SLAB_RECLAIMABLE);
  2727. if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
  2728. break;
  2729. }
  2730. /*
  2731. * Update nr_reclaimed by the number of slab pages we
  2732. * reclaimed from this zone.
  2733. */
  2734. nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2735. if (nr_slab_pages1 < nr_slab_pages0)
  2736. sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
  2737. }
  2738. p->reclaim_state = NULL;
  2739. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  2740. lockdep_clear_current_reclaim_state();
  2741. return sc.nr_reclaimed >= nr_pages;
  2742. }
  2743. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2744. {
  2745. int node_id;
  2746. int ret;
  2747. /*
  2748. * Zone reclaim reclaims unmapped file backed pages and
  2749. * slab pages if we are over the defined limits.
  2750. *
  2751. * A small portion of unmapped file backed pages is needed for
  2752. * file I/O otherwise pages read by file I/O will be immediately
  2753. * thrown out if the zone is overallocated. So we do not reclaim
  2754. * if less than a specified percentage of the zone is used by
  2755. * unmapped file backed pages.
  2756. */
  2757. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  2758. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  2759. return ZONE_RECLAIM_FULL;
  2760. if (zone->all_unreclaimable)
  2761. return ZONE_RECLAIM_FULL;
  2762. /*
  2763. * Do not scan if the allocation should not be delayed.
  2764. */
  2765. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  2766. return ZONE_RECLAIM_NOSCAN;
  2767. /*
  2768. * Only run zone reclaim on the local zone or on zones that do not
  2769. * have associated processors. This will favor the local processor
  2770. * over remote processors and spread off node memory allocations
  2771. * as wide as possible.
  2772. */
  2773. node_id = zone_to_nid(zone);
  2774. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  2775. return ZONE_RECLAIM_NOSCAN;
  2776. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  2777. return ZONE_RECLAIM_NOSCAN;
  2778. ret = __zone_reclaim(zone, gfp_mask, order);
  2779. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  2780. if (!ret)
  2781. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  2782. return ret;
  2783. }
  2784. #endif
  2785. /*
  2786. * page_evictable - test whether a page is evictable
  2787. * @page: the page to test
  2788. * @vma: the VMA in which the page is or will be mapped, may be NULL
  2789. *
  2790. * Test whether page is evictable--i.e., should be placed on active/inactive
  2791. * lists vs unevictable list. The vma argument is !NULL when called from the
  2792. * fault path to determine how to instantate a new page.
  2793. *
  2794. * Reasons page might not be evictable:
  2795. * (1) page's mapping marked unevictable
  2796. * (2) page is part of an mlocked VMA
  2797. *
  2798. */
  2799. int page_evictable(struct page *page, struct vm_area_struct *vma)
  2800. {
  2801. if (mapping_unevictable(page_mapping(page)))
  2802. return 0;
  2803. if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
  2804. return 0;
  2805. return 1;
  2806. }
  2807. #ifdef CONFIG_SHMEM
  2808. /**
  2809. * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
  2810. * @pages: array of pages to check
  2811. * @nr_pages: number of pages to check
  2812. *
  2813. * Checks pages for evictability and moves them to the appropriate lru list.
  2814. *
  2815. * This function is only used for SysV IPC SHM_UNLOCK.
  2816. */
  2817. void check_move_unevictable_pages(struct page **pages, int nr_pages)
  2818. {
  2819. struct lruvec *lruvec;
  2820. struct zone *zone = NULL;
  2821. int pgscanned = 0;
  2822. int pgrescued = 0;
  2823. int i;
  2824. for (i = 0; i < nr_pages; i++) {
  2825. struct page *page = pages[i];
  2826. struct zone *pagezone;
  2827. pgscanned++;
  2828. pagezone = page_zone(page);
  2829. if (pagezone != zone) {
  2830. if (zone)
  2831. spin_unlock_irq(&zone->lru_lock);
  2832. zone = pagezone;
  2833. spin_lock_irq(&zone->lru_lock);
  2834. }
  2835. lruvec = mem_cgroup_page_lruvec(page, zone);
  2836. if (!PageLRU(page) || !PageUnevictable(page))
  2837. continue;
  2838. if (page_evictable(page, NULL)) {
  2839. enum lru_list lru = page_lru_base_type(page);
  2840. VM_BUG_ON(PageActive(page));
  2841. ClearPageUnevictable(page);
  2842. del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
  2843. add_page_to_lru_list(page, lruvec, lru);
  2844. pgrescued++;
  2845. }
  2846. }
  2847. if (zone) {
  2848. __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
  2849. __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
  2850. spin_unlock_irq(&zone->lru_lock);
  2851. }
  2852. }
  2853. #endif /* CONFIG_SHMEM */
  2854. static void warn_scan_unevictable_pages(void)
  2855. {
  2856. printk_once(KERN_WARNING
  2857. "%s: The scan_unevictable_pages sysctl/node-interface has been "
  2858. "disabled for lack of a legitimate use case. If you have "
  2859. "one, please send an email to linux-mm@kvack.org.\n",
  2860. current->comm);
  2861. }
  2862. /*
  2863. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  2864. * all nodes' unevictable lists for evictable pages
  2865. */
  2866. unsigned long scan_unevictable_pages;
  2867. int scan_unevictable_handler(struct ctl_table *table, int write,
  2868. void __user *buffer,
  2869. size_t *length, loff_t *ppos)
  2870. {
  2871. warn_scan_unevictable_pages();
  2872. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2873. scan_unevictable_pages = 0;
  2874. return 0;
  2875. }
  2876. #ifdef CONFIG_NUMA
  2877. /*
  2878. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  2879. * a specified node's per zone unevictable lists for evictable pages.
  2880. */
  2881. static ssize_t read_scan_unevictable_node(struct device *dev,
  2882. struct device_attribute *attr,
  2883. char *buf)
  2884. {
  2885. warn_scan_unevictable_pages();
  2886. return sprintf(buf, "0\n"); /* always zero; should fit... */
  2887. }
  2888. static ssize_t write_scan_unevictable_node(struct device *dev,
  2889. struct device_attribute *attr,
  2890. const char *buf, size_t count)
  2891. {
  2892. warn_scan_unevictable_pages();
  2893. return 1;
  2894. }
  2895. static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  2896. read_scan_unevictable_node,
  2897. write_scan_unevictable_node);
  2898. int scan_unevictable_register_node(struct node *node)
  2899. {
  2900. return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
  2901. }
  2902. void scan_unevictable_unregister_node(struct node *node)
  2903. {
  2904. device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
  2905. }
  2906. #endif