vmscan.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/div64.h>
  44. #include <linux/swapops.h>
  45. #include "internal.h"
  46. struct scan_control {
  47. /* Incremented by the number of inactive pages that were scanned */
  48. unsigned long nr_scanned;
  49. /* Number of pages freed so far during a call to shrink_zones() */
  50. unsigned long nr_reclaimed;
  51. /* This context's GFP mask */
  52. gfp_t gfp_mask;
  53. int may_writepage;
  54. /* Can mapped pages be reclaimed? */
  55. int may_unmap;
  56. /* Can pages be swapped as part of reclaim? */
  57. int may_swap;
  58. /* This context's SWAP_CLUSTER_MAX. If freeing memory for
  59. * suspend, we effectively ignore SWAP_CLUSTER_MAX.
  60. * In this context, it doesn't matter that we scan the
  61. * whole list at once. */
  62. int swap_cluster_max;
  63. int swappiness;
  64. int all_unreclaimable;
  65. int order;
  66. /* Which cgroup do we reclaim from */
  67. struct mem_cgroup *mem_cgroup;
  68. /*
  69. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  70. * are scanned.
  71. */
  72. nodemask_t *nodemask;
  73. /* Pluggable isolate pages callback */
  74. unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
  75. unsigned long *scanned, int order, int mode,
  76. struct zone *z, struct mem_cgroup *mem_cont,
  77. int active, int file);
  78. };
  79. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  80. #ifdef ARCH_HAS_PREFETCH
  81. #define prefetch_prev_lru_page(_page, _base, _field) \
  82. do { \
  83. if ((_page)->lru.prev != _base) { \
  84. struct page *prev; \
  85. \
  86. prev = lru_to_page(&(_page->lru)); \
  87. prefetch(&prev->_field); \
  88. } \
  89. } while (0)
  90. #else
  91. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  92. #endif
  93. #ifdef ARCH_HAS_PREFETCHW
  94. #define prefetchw_prev_lru_page(_page, _base, _field) \
  95. do { \
  96. if ((_page)->lru.prev != _base) { \
  97. struct page *prev; \
  98. \
  99. prev = lru_to_page(&(_page->lru)); \
  100. prefetchw(&prev->_field); \
  101. } \
  102. } while (0)
  103. #else
  104. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  105. #endif
  106. /*
  107. * From 0 .. 100. Higher means more swappy.
  108. */
  109. int vm_swappiness = 60;
  110. long vm_total_pages; /* The total number of pages which the VM controls */
  111. static LIST_HEAD(shrinker_list);
  112. static DECLARE_RWSEM(shrinker_rwsem);
  113. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  114. #define scanning_global_lru(sc) (!(sc)->mem_cgroup)
  115. #else
  116. #define scanning_global_lru(sc) (1)
  117. #endif
  118. static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
  119. struct scan_control *sc)
  120. {
  121. if (!scanning_global_lru(sc))
  122. return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
  123. return &zone->reclaim_stat;
  124. }
  125. static unsigned long zone_nr_lru_pages(struct zone *zone,
  126. struct scan_control *sc, enum lru_list lru)
  127. {
  128. if (!scanning_global_lru(sc))
  129. return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
  130. return zone_page_state(zone, NR_LRU_BASE + lru);
  131. }
  132. /*
  133. * Add a shrinker callback to be called from the vm
  134. */
  135. void register_shrinker(struct shrinker *shrinker)
  136. {
  137. shrinker->nr = 0;
  138. down_write(&shrinker_rwsem);
  139. list_add_tail(&shrinker->list, &shrinker_list);
  140. up_write(&shrinker_rwsem);
  141. }
  142. EXPORT_SYMBOL(register_shrinker);
  143. /*
  144. * Remove one
  145. */
  146. void unregister_shrinker(struct shrinker *shrinker)
  147. {
  148. down_write(&shrinker_rwsem);
  149. list_del(&shrinker->list);
  150. up_write(&shrinker_rwsem);
  151. }
  152. EXPORT_SYMBOL(unregister_shrinker);
  153. #define SHRINK_BATCH 128
  154. /*
  155. * Call the shrink functions to age shrinkable caches
  156. *
  157. * Here we assume it costs one seek to replace a lru page and that it also
  158. * takes a seek to recreate a cache object. With this in mind we age equal
  159. * percentages of the lru and ageable caches. This should balance the seeks
  160. * generated by these structures.
  161. *
  162. * If the vm encountered mapped pages on the LRU it increase the pressure on
  163. * slab to avoid swapping.
  164. *
  165. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  166. *
  167. * `lru_pages' represents the number of on-LRU pages in all the zones which
  168. * are eligible for the caller's allocation attempt. It is used for balancing
  169. * slab reclaim versus page reclaim.
  170. *
  171. * Returns the number of slab objects which we shrunk.
  172. */
  173. unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
  174. unsigned long lru_pages)
  175. {
  176. struct shrinker *shrinker;
  177. unsigned long ret = 0;
  178. if (scanned == 0)
  179. scanned = SWAP_CLUSTER_MAX;
  180. if (!down_read_trylock(&shrinker_rwsem))
  181. return 1; /* Assume we'll be able to shrink next time */
  182. list_for_each_entry(shrinker, &shrinker_list, list) {
  183. unsigned long long delta;
  184. unsigned long total_scan;
  185. unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
  186. delta = (4 * scanned) / shrinker->seeks;
  187. delta *= max_pass;
  188. do_div(delta, lru_pages + 1);
  189. shrinker->nr += delta;
  190. if (shrinker->nr < 0) {
  191. printk(KERN_ERR "shrink_slab: %pF negative objects to "
  192. "delete nr=%ld\n",
  193. shrinker->shrink, shrinker->nr);
  194. shrinker->nr = max_pass;
  195. }
  196. /*
  197. * Avoid risking looping forever due to too large nr value:
  198. * never try to free more than twice the estimate number of
  199. * freeable entries.
  200. */
  201. if (shrinker->nr > max_pass * 2)
  202. shrinker->nr = max_pass * 2;
  203. total_scan = shrinker->nr;
  204. shrinker->nr = 0;
  205. while (total_scan >= SHRINK_BATCH) {
  206. long this_scan = SHRINK_BATCH;
  207. int shrink_ret;
  208. int nr_before;
  209. nr_before = (*shrinker->shrink)(0, gfp_mask);
  210. shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
  211. if (shrink_ret == -1)
  212. break;
  213. if (shrink_ret < nr_before)
  214. ret += nr_before - shrink_ret;
  215. count_vm_events(SLABS_SCANNED, this_scan);
  216. total_scan -= this_scan;
  217. cond_resched();
  218. }
  219. shrinker->nr += total_scan;
  220. }
  221. up_read(&shrinker_rwsem);
  222. return ret;
  223. }
  224. /* Called without lock on whether page is mapped, so answer is unstable */
  225. static inline int page_mapping_inuse(struct page *page)
  226. {
  227. struct address_space *mapping;
  228. /* Page is in somebody's page tables. */
  229. if (page_mapped(page))
  230. return 1;
  231. /* Be more reluctant to reclaim swapcache than pagecache */
  232. if (PageSwapCache(page))
  233. return 1;
  234. mapping = page_mapping(page);
  235. if (!mapping)
  236. return 0;
  237. /* File is mmap'd by somebody? */
  238. return mapping_mapped(mapping);
  239. }
  240. static inline int is_page_cache_freeable(struct page *page)
  241. {
  242. /*
  243. * A freeable page cache page is referenced only by the caller
  244. * that isolated the page, the page cache radix tree and
  245. * optional buffer heads at page->private.
  246. */
  247. return page_count(page) - page_has_private(page) == 2;
  248. }
  249. static int may_write_to_queue(struct backing_dev_info *bdi)
  250. {
  251. if (current->flags & PF_SWAPWRITE)
  252. return 1;
  253. if (!bdi_write_congested(bdi))
  254. return 1;
  255. if (bdi == current->backing_dev_info)
  256. return 1;
  257. return 0;
  258. }
  259. /*
  260. * We detected a synchronous write error writing a page out. Probably
  261. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  262. * fsync(), msync() or close().
  263. *
  264. * The tricky part is that after writepage we cannot touch the mapping: nothing
  265. * prevents it from being freed up. But we have a ref on the page and once
  266. * that page is locked, the mapping is pinned.
  267. *
  268. * We're allowed to run sleeping lock_page() here because we know the caller has
  269. * __GFP_FS.
  270. */
  271. static void handle_write_error(struct address_space *mapping,
  272. struct page *page, int error)
  273. {
  274. lock_page(page);
  275. if (page_mapping(page) == mapping)
  276. mapping_set_error(mapping, error);
  277. unlock_page(page);
  278. }
  279. /* Request for sync pageout. */
  280. enum pageout_io {
  281. PAGEOUT_IO_ASYNC,
  282. PAGEOUT_IO_SYNC,
  283. };
  284. /* possible outcome of pageout() */
  285. typedef enum {
  286. /* failed to write page out, page is locked */
  287. PAGE_KEEP,
  288. /* move page to the active list, page is locked */
  289. PAGE_ACTIVATE,
  290. /* page has been sent to the disk successfully, page is unlocked */
  291. PAGE_SUCCESS,
  292. /* page is clean and locked */
  293. PAGE_CLEAN,
  294. } pageout_t;
  295. /*
  296. * pageout is called by shrink_page_list() for each dirty page.
  297. * Calls ->writepage().
  298. */
  299. static pageout_t pageout(struct page *page, struct address_space *mapping,
  300. enum pageout_io sync_writeback)
  301. {
  302. /*
  303. * If the page is dirty, only perform writeback if that write
  304. * will be non-blocking. To prevent this allocation from being
  305. * stalled by pagecache activity. But note that there may be
  306. * stalls if we need to run get_block(). We could test
  307. * PagePrivate for that.
  308. *
  309. * If this process is currently in generic_file_write() against
  310. * this page's queue, we can perform writeback even if that
  311. * will block.
  312. *
  313. * If the page is swapcache, write it back even if that would
  314. * block, for some throttling. This happens by accident, because
  315. * swap_backing_dev_info is bust: it doesn't reflect the
  316. * congestion state of the swapdevs. Easy to fix, if needed.
  317. */
  318. if (!is_page_cache_freeable(page))
  319. return PAGE_KEEP;
  320. if (!mapping) {
  321. /*
  322. * Some data journaling orphaned pages can have
  323. * page->mapping == NULL while being dirty with clean buffers.
  324. */
  325. if (page_has_private(page)) {
  326. if (try_to_free_buffers(page)) {
  327. ClearPageDirty(page);
  328. printk("%s: orphaned page\n", __func__);
  329. return PAGE_CLEAN;
  330. }
  331. }
  332. return PAGE_KEEP;
  333. }
  334. if (mapping->a_ops->writepage == NULL)
  335. return PAGE_ACTIVATE;
  336. if (!may_write_to_queue(mapping->backing_dev_info))
  337. return PAGE_KEEP;
  338. if (clear_page_dirty_for_io(page)) {
  339. int res;
  340. struct writeback_control wbc = {
  341. .sync_mode = WB_SYNC_NONE,
  342. .nr_to_write = SWAP_CLUSTER_MAX,
  343. .range_start = 0,
  344. .range_end = LLONG_MAX,
  345. .nonblocking = 1,
  346. .for_reclaim = 1,
  347. };
  348. SetPageReclaim(page);
  349. res = mapping->a_ops->writepage(page, &wbc);
  350. if (res < 0)
  351. handle_write_error(mapping, page, res);
  352. if (res == AOP_WRITEPAGE_ACTIVATE) {
  353. ClearPageReclaim(page);
  354. return PAGE_ACTIVATE;
  355. }
  356. /*
  357. * Wait on writeback if requested to. This happens when
  358. * direct reclaiming a large contiguous area and the
  359. * first attempt to free a range of pages fails.
  360. */
  361. if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
  362. wait_on_page_writeback(page);
  363. if (!PageWriteback(page)) {
  364. /* synchronous write or broken a_ops? */
  365. ClearPageReclaim(page);
  366. }
  367. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  368. return PAGE_SUCCESS;
  369. }
  370. return PAGE_CLEAN;
  371. }
  372. /*
  373. * Same as remove_mapping, but if the page is removed from the mapping, it
  374. * gets returned with a refcount of 0.
  375. */
  376. static int __remove_mapping(struct address_space *mapping, struct page *page)
  377. {
  378. BUG_ON(!PageLocked(page));
  379. BUG_ON(mapping != page_mapping(page));
  380. spin_lock_irq(&mapping->tree_lock);
  381. /*
  382. * The non racy check for a busy page.
  383. *
  384. * Must be careful with the order of the tests. When someone has
  385. * a ref to the page, it may be possible that they dirty it then
  386. * drop the reference. So if PageDirty is tested before page_count
  387. * here, then the following race may occur:
  388. *
  389. * get_user_pages(&page);
  390. * [user mapping goes away]
  391. * write_to(page);
  392. * !PageDirty(page) [good]
  393. * SetPageDirty(page);
  394. * put_page(page);
  395. * !page_count(page) [good, discard it]
  396. *
  397. * [oops, our write_to data is lost]
  398. *
  399. * Reversing the order of the tests ensures such a situation cannot
  400. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  401. * load is not satisfied before that of page->_count.
  402. *
  403. * Note that if SetPageDirty is always performed via set_page_dirty,
  404. * and thus under tree_lock, then this ordering is not required.
  405. */
  406. if (!page_freeze_refs(page, 2))
  407. goto cannot_free;
  408. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  409. if (unlikely(PageDirty(page))) {
  410. page_unfreeze_refs(page, 2);
  411. goto cannot_free;
  412. }
  413. if (PageSwapCache(page)) {
  414. swp_entry_t swap = { .val = page_private(page) };
  415. __delete_from_swap_cache(page);
  416. spin_unlock_irq(&mapping->tree_lock);
  417. swapcache_free(swap, page);
  418. } else {
  419. __remove_from_page_cache(page);
  420. spin_unlock_irq(&mapping->tree_lock);
  421. mem_cgroup_uncharge_cache_page(page);
  422. }
  423. return 1;
  424. cannot_free:
  425. spin_unlock_irq(&mapping->tree_lock);
  426. return 0;
  427. }
  428. /*
  429. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  430. * someone else has a ref on the page, abort and return 0. If it was
  431. * successfully detached, return 1. Assumes the caller has a single ref on
  432. * this page.
  433. */
  434. int remove_mapping(struct address_space *mapping, struct page *page)
  435. {
  436. if (__remove_mapping(mapping, page)) {
  437. /*
  438. * Unfreezing the refcount with 1 rather than 2 effectively
  439. * drops the pagecache ref for us without requiring another
  440. * atomic operation.
  441. */
  442. page_unfreeze_refs(page, 1);
  443. return 1;
  444. }
  445. return 0;
  446. }
  447. /**
  448. * putback_lru_page - put previously isolated page onto appropriate LRU list
  449. * @page: page to be put back to appropriate lru list
  450. *
  451. * Add previously isolated @page to appropriate LRU list.
  452. * Page may still be unevictable for other reasons.
  453. *
  454. * lru_lock must not be held, interrupts must be enabled.
  455. */
  456. void putback_lru_page(struct page *page)
  457. {
  458. int lru;
  459. int active = !!TestClearPageActive(page);
  460. int was_unevictable = PageUnevictable(page);
  461. VM_BUG_ON(PageLRU(page));
  462. redo:
  463. ClearPageUnevictable(page);
  464. if (page_evictable(page, NULL)) {
  465. /*
  466. * For evictable pages, we can use the cache.
  467. * In event of a race, worst case is we end up with an
  468. * unevictable page on [in]active list.
  469. * We know how to handle that.
  470. */
  471. lru = active + page_lru_base_type(page);
  472. lru_cache_add_lru(page, lru);
  473. } else {
  474. /*
  475. * Put unevictable pages directly on zone's unevictable
  476. * list.
  477. */
  478. lru = LRU_UNEVICTABLE;
  479. add_page_to_unevictable_list(page);
  480. }
  481. /*
  482. * page's status can change while we move it among lru. If an evictable
  483. * page is on unevictable list, it never be freed. To avoid that,
  484. * check after we added it to the list, again.
  485. */
  486. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  487. if (!isolate_lru_page(page)) {
  488. put_page(page);
  489. goto redo;
  490. }
  491. /* This means someone else dropped this page from LRU
  492. * So, it will be freed or putback to LRU again. There is
  493. * nothing to do here.
  494. */
  495. }
  496. if (was_unevictable && lru != LRU_UNEVICTABLE)
  497. count_vm_event(UNEVICTABLE_PGRESCUED);
  498. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  499. count_vm_event(UNEVICTABLE_PGCULLED);
  500. put_page(page); /* drop ref from isolate */
  501. }
  502. /*
  503. * shrink_page_list() returns the number of reclaimed pages
  504. */
  505. static unsigned long shrink_page_list(struct list_head *page_list,
  506. struct scan_control *sc,
  507. enum pageout_io sync_writeback)
  508. {
  509. LIST_HEAD(ret_pages);
  510. struct pagevec freed_pvec;
  511. int pgactivate = 0;
  512. unsigned long nr_reclaimed = 0;
  513. unsigned long vm_flags;
  514. cond_resched();
  515. pagevec_init(&freed_pvec, 1);
  516. while (!list_empty(page_list)) {
  517. struct address_space *mapping;
  518. struct page *page;
  519. int may_enter_fs;
  520. int referenced;
  521. cond_resched();
  522. page = lru_to_page(page_list);
  523. list_del(&page->lru);
  524. if (!trylock_page(page))
  525. goto keep;
  526. VM_BUG_ON(PageActive(page));
  527. sc->nr_scanned++;
  528. if (unlikely(!page_evictable(page, NULL)))
  529. goto cull_mlocked;
  530. if (!sc->may_unmap && page_mapped(page))
  531. goto keep_locked;
  532. /* Double the slab pressure for mapped and swapcache pages */
  533. if (page_mapped(page) || PageSwapCache(page))
  534. sc->nr_scanned++;
  535. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  536. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  537. if (PageWriteback(page)) {
  538. /*
  539. * Synchronous reclaim is performed in two passes,
  540. * first an asynchronous pass over the list to
  541. * start parallel writeback, and a second synchronous
  542. * pass to wait for the IO to complete. Wait here
  543. * for any page for which writeback has already
  544. * started.
  545. */
  546. if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
  547. wait_on_page_writeback(page);
  548. else
  549. goto keep_locked;
  550. }
  551. referenced = page_referenced(page, 1,
  552. sc->mem_cgroup, &vm_flags);
  553. /*
  554. * In active use or really unfreeable? Activate it.
  555. * If page which have PG_mlocked lost isoltation race,
  556. * try_to_unmap moves it to unevictable list
  557. */
  558. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
  559. referenced && page_mapping_inuse(page)
  560. && !(vm_flags & VM_LOCKED))
  561. goto activate_locked;
  562. /*
  563. * Anonymous process memory has backing store?
  564. * Try to allocate it some swap space here.
  565. */
  566. if (PageAnon(page) && !PageSwapCache(page)) {
  567. if (!(sc->gfp_mask & __GFP_IO))
  568. goto keep_locked;
  569. if (!add_to_swap(page))
  570. goto activate_locked;
  571. may_enter_fs = 1;
  572. }
  573. mapping = page_mapping(page);
  574. /*
  575. * The page is mapped into the page tables of one or more
  576. * processes. Try to unmap it here.
  577. */
  578. if (page_mapped(page) && mapping) {
  579. switch (try_to_unmap(page, 0)) {
  580. case SWAP_FAIL:
  581. goto activate_locked;
  582. case SWAP_AGAIN:
  583. goto keep_locked;
  584. case SWAP_MLOCK:
  585. goto cull_mlocked;
  586. case SWAP_SUCCESS:
  587. ; /* try to free the page below */
  588. }
  589. }
  590. if (PageDirty(page)) {
  591. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
  592. goto keep_locked;
  593. if (!may_enter_fs)
  594. goto keep_locked;
  595. if (!sc->may_writepage)
  596. goto keep_locked;
  597. /* Page is dirty, try to write it out here */
  598. switch (pageout(page, mapping, sync_writeback)) {
  599. case PAGE_KEEP:
  600. goto keep_locked;
  601. case PAGE_ACTIVATE:
  602. goto activate_locked;
  603. case PAGE_SUCCESS:
  604. if (PageWriteback(page) || PageDirty(page))
  605. goto keep;
  606. /*
  607. * A synchronous write - probably a ramdisk. Go
  608. * ahead and try to reclaim the page.
  609. */
  610. if (!trylock_page(page))
  611. goto keep;
  612. if (PageDirty(page) || PageWriteback(page))
  613. goto keep_locked;
  614. mapping = page_mapping(page);
  615. case PAGE_CLEAN:
  616. ; /* try to free the page below */
  617. }
  618. }
  619. /*
  620. * If the page has buffers, try to free the buffer mappings
  621. * associated with this page. If we succeed we try to free
  622. * the page as well.
  623. *
  624. * We do this even if the page is PageDirty().
  625. * try_to_release_page() does not perform I/O, but it is
  626. * possible for a page to have PageDirty set, but it is actually
  627. * clean (all its buffers are clean). This happens if the
  628. * buffers were written out directly, with submit_bh(). ext3
  629. * will do this, as well as the blockdev mapping.
  630. * try_to_release_page() will discover that cleanness and will
  631. * drop the buffers and mark the page clean - it can be freed.
  632. *
  633. * Rarely, pages can have buffers and no ->mapping. These are
  634. * the pages which were not successfully invalidated in
  635. * truncate_complete_page(). We try to drop those buffers here
  636. * and if that worked, and the page is no longer mapped into
  637. * process address space (page_count == 1) it can be freed.
  638. * Otherwise, leave the page on the LRU so it is swappable.
  639. */
  640. if (page_has_private(page)) {
  641. if (!try_to_release_page(page, sc->gfp_mask))
  642. goto activate_locked;
  643. if (!mapping && page_count(page) == 1) {
  644. unlock_page(page);
  645. if (put_page_testzero(page))
  646. goto free_it;
  647. else {
  648. /*
  649. * rare race with speculative reference.
  650. * the speculative reference will free
  651. * this page shortly, so we may
  652. * increment nr_reclaimed here (and
  653. * leave it off the LRU).
  654. */
  655. nr_reclaimed++;
  656. continue;
  657. }
  658. }
  659. }
  660. if (!mapping || !__remove_mapping(mapping, page))
  661. goto keep_locked;
  662. /*
  663. * At this point, we have no other references and there is
  664. * no way to pick any more up (removed from LRU, removed
  665. * from pagecache). Can use non-atomic bitops now (and
  666. * we obviously don't have to worry about waking up a process
  667. * waiting on the page lock, because there are no references.
  668. */
  669. __clear_page_locked(page);
  670. free_it:
  671. nr_reclaimed++;
  672. if (!pagevec_add(&freed_pvec, page)) {
  673. __pagevec_free(&freed_pvec);
  674. pagevec_reinit(&freed_pvec);
  675. }
  676. continue;
  677. cull_mlocked:
  678. if (PageSwapCache(page))
  679. try_to_free_swap(page);
  680. unlock_page(page);
  681. putback_lru_page(page);
  682. continue;
  683. activate_locked:
  684. /* Not a candidate for swapping, so reclaim swap space. */
  685. if (PageSwapCache(page) && vm_swap_full())
  686. try_to_free_swap(page);
  687. VM_BUG_ON(PageActive(page));
  688. SetPageActive(page);
  689. pgactivate++;
  690. keep_locked:
  691. unlock_page(page);
  692. keep:
  693. list_add(&page->lru, &ret_pages);
  694. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  695. }
  696. list_splice(&ret_pages, page_list);
  697. if (pagevec_count(&freed_pvec))
  698. __pagevec_free(&freed_pvec);
  699. count_vm_events(PGACTIVATE, pgactivate);
  700. return nr_reclaimed;
  701. }
  702. /* LRU Isolation modes. */
  703. #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
  704. #define ISOLATE_ACTIVE 1 /* Isolate active pages. */
  705. #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
  706. /*
  707. * Attempt to remove the specified page from its LRU. Only take this page
  708. * if it is of the appropriate PageActive status. Pages which are being
  709. * freed elsewhere are also ignored.
  710. *
  711. * page: page to consider
  712. * mode: one of the LRU isolation modes defined above
  713. *
  714. * returns 0 on success, -ve errno on failure.
  715. */
  716. int __isolate_lru_page(struct page *page, int mode, int file)
  717. {
  718. int ret = -EINVAL;
  719. /* Only take pages on the LRU. */
  720. if (!PageLRU(page))
  721. return ret;
  722. /*
  723. * When checking the active state, we need to be sure we are
  724. * dealing with comparible boolean values. Take the logical not
  725. * of each.
  726. */
  727. if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
  728. return ret;
  729. if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
  730. return ret;
  731. /*
  732. * When this function is being called for lumpy reclaim, we
  733. * initially look into all LRU pages, active, inactive and
  734. * unevictable; only give shrink_page_list evictable pages.
  735. */
  736. if (PageUnevictable(page))
  737. return ret;
  738. ret = -EBUSY;
  739. if (likely(get_page_unless_zero(page))) {
  740. /*
  741. * Be careful not to clear PageLRU until after we're
  742. * sure the page is not being freed elsewhere -- the
  743. * page release code relies on it.
  744. */
  745. ClearPageLRU(page);
  746. ret = 0;
  747. }
  748. return ret;
  749. }
  750. /*
  751. * zone->lru_lock is heavily contended. Some of the functions that
  752. * shrink the lists perform better by taking out a batch of pages
  753. * and working on them outside the LRU lock.
  754. *
  755. * For pagecache intensive workloads, this function is the hottest
  756. * spot in the kernel (apart from copy_*_user functions).
  757. *
  758. * Appropriate locks must be held before calling this function.
  759. *
  760. * @nr_to_scan: The number of pages to look through on the list.
  761. * @src: The LRU list to pull pages off.
  762. * @dst: The temp list to put pages on to.
  763. * @scanned: The number of pages that were scanned.
  764. * @order: The caller's attempted allocation order
  765. * @mode: One of the LRU isolation modes
  766. * @file: True [1] if isolating file [!anon] pages
  767. *
  768. * returns how many pages were moved onto *@dst.
  769. */
  770. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  771. struct list_head *src, struct list_head *dst,
  772. unsigned long *scanned, int order, int mode, int file)
  773. {
  774. unsigned long nr_taken = 0;
  775. unsigned long scan;
  776. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  777. struct page *page;
  778. unsigned long pfn;
  779. unsigned long end_pfn;
  780. unsigned long page_pfn;
  781. int zone_id;
  782. page = lru_to_page(src);
  783. prefetchw_prev_lru_page(page, src, flags);
  784. VM_BUG_ON(!PageLRU(page));
  785. switch (__isolate_lru_page(page, mode, file)) {
  786. case 0:
  787. list_move(&page->lru, dst);
  788. mem_cgroup_del_lru(page);
  789. nr_taken++;
  790. break;
  791. case -EBUSY:
  792. /* else it is being freed elsewhere */
  793. list_move(&page->lru, src);
  794. mem_cgroup_rotate_lru_list(page, page_lru(page));
  795. continue;
  796. default:
  797. BUG();
  798. }
  799. if (!order)
  800. continue;
  801. /*
  802. * Attempt to take all pages in the order aligned region
  803. * surrounding the tag page. Only take those pages of
  804. * the same active state as that tag page. We may safely
  805. * round the target page pfn down to the requested order
  806. * as the mem_map is guarenteed valid out to MAX_ORDER,
  807. * where that page is in a different zone we will detect
  808. * it from its zone id and abort this block scan.
  809. */
  810. zone_id = page_zone_id(page);
  811. page_pfn = page_to_pfn(page);
  812. pfn = page_pfn & ~((1 << order) - 1);
  813. end_pfn = pfn + (1 << order);
  814. for (; pfn < end_pfn; pfn++) {
  815. struct page *cursor_page;
  816. /* The target page is in the block, ignore it. */
  817. if (unlikely(pfn == page_pfn))
  818. continue;
  819. /* Avoid holes within the zone. */
  820. if (unlikely(!pfn_valid_within(pfn)))
  821. break;
  822. cursor_page = pfn_to_page(pfn);
  823. /* Check that we have not crossed a zone boundary. */
  824. if (unlikely(page_zone_id(cursor_page) != zone_id))
  825. continue;
  826. /*
  827. * If we don't have enough swap space, reclaiming of
  828. * anon page which don't already have a swap slot is
  829. * pointless.
  830. */
  831. if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
  832. !PageSwapCache(cursor_page))
  833. continue;
  834. if (__isolate_lru_page(cursor_page, mode, file) == 0) {
  835. list_move(&cursor_page->lru, dst);
  836. mem_cgroup_del_lru(cursor_page);
  837. nr_taken++;
  838. scan++;
  839. }
  840. }
  841. }
  842. *scanned = scan;
  843. return nr_taken;
  844. }
  845. static unsigned long isolate_pages_global(unsigned long nr,
  846. struct list_head *dst,
  847. unsigned long *scanned, int order,
  848. int mode, struct zone *z,
  849. struct mem_cgroup *mem_cont,
  850. int active, int file)
  851. {
  852. int lru = LRU_BASE;
  853. if (active)
  854. lru += LRU_ACTIVE;
  855. if (file)
  856. lru += LRU_FILE;
  857. return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
  858. mode, file);
  859. }
  860. /*
  861. * clear_active_flags() is a helper for shrink_active_list(), clearing
  862. * any active bits from the pages in the list.
  863. */
  864. static unsigned long clear_active_flags(struct list_head *page_list,
  865. unsigned int *count)
  866. {
  867. int nr_active = 0;
  868. int lru;
  869. struct page *page;
  870. list_for_each_entry(page, page_list, lru) {
  871. lru = page_lru_base_type(page);
  872. if (PageActive(page)) {
  873. lru += LRU_ACTIVE;
  874. ClearPageActive(page);
  875. nr_active++;
  876. }
  877. count[lru]++;
  878. }
  879. return nr_active;
  880. }
  881. /**
  882. * isolate_lru_page - tries to isolate a page from its LRU list
  883. * @page: page to isolate from its LRU list
  884. *
  885. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  886. * vmstat statistic corresponding to whatever LRU list the page was on.
  887. *
  888. * Returns 0 if the page was removed from an LRU list.
  889. * Returns -EBUSY if the page was not on an LRU list.
  890. *
  891. * The returned page will have PageLRU() cleared. If it was found on
  892. * the active list, it will have PageActive set. If it was found on
  893. * the unevictable list, it will have the PageUnevictable bit set. That flag
  894. * may need to be cleared by the caller before letting the page go.
  895. *
  896. * The vmstat statistic corresponding to the list on which the page was
  897. * found will be decremented.
  898. *
  899. * Restrictions:
  900. * (1) Must be called with an elevated refcount on the page. This is a
  901. * fundamentnal difference from isolate_lru_pages (which is called
  902. * without a stable reference).
  903. * (2) the lru_lock must not be held.
  904. * (3) interrupts must be enabled.
  905. */
  906. int isolate_lru_page(struct page *page)
  907. {
  908. int ret = -EBUSY;
  909. if (PageLRU(page)) {
  910. struct zone *zone = page_zone(page);
  911. spin_lock_irq(&zone->lru_lock);
  912. if (PageLRU(page) && get_page_unless_zero(page)) {
  913. int lru = page_lru(page);
  914. ret = 0;
  915. ClearPageLRU(page);
  916. del_page_from_lru_list(zone, page, lru);
  917. }
  918. spin_unlock_irq(&zone->lru_lock);
  919. }
  920. return ret;
  921. }
  922. /*
  923. * Are there way too many processes in the direct reclaim path already?
  924. */
  925. static int too_many_isolated(struct zone *zone, int file,
  926. struct scan_control *sc)
  927. {
  928. unsigned long inactive, isolated;
  929. if (current_is_kswapd())
  930. return 0;
  931. if (!scanning_global_lru(sc))
  932. return 0;
  933. if (file) {
  934. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  935. isolated = zone_page_state(zone, NR_ISOLATED_FILE);
  936. } else {
  937. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  938. isolated = zone_page_state(zone, NR_ISOLATED_ANON);
  939. }
  940. return isolated > inactive;
  941. }
  942. /*
  943. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  944. * of reclaimed pages
  945. */
  946. static unsigned long shrink_inactive_list(unsigned long max_scan,
  947. struct zone *zone, struct scan_control *sc,
  948. int priority, int file)
  949. {
  950. LIST_HEAD(page_list);
  951. struct pagevec pvec;
  952. unsigned long nr_scanned = 0;
  953. unsigned long nr_reclaimed = 0;
  954. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  955. int lumpy_reclaim = 0;
  956. while (unlikely(too_many_isolated(zone, file, sc))) {
  957. congestion_wait(WRITE, HZ/10);
  958. /* We are about to die and free our memory. Return now. */
  959. if (fatal_signal_pending(current))
  960. return SWAP_CLUSTER_MAX;
  961. }
  962. /*
  963. * If we need a large contiguous chunk of memory, or have
  964. * trouble getting a small set of contiguous pages, we
  965. * will reclaim both active and inactive pages.
  966. *
  967. * We use the same threshold as pageout congestion_wait below.
  968. */
  969. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  970. lumpy_reclaim = 1;
  971. else if (sc->order && priority < DEF_PRIORITY - 2)
  972. lumpy_reclaim = 1;
  973. pagevec_init(&pvec, 1);
  974. lru_add_drain();
  975. spin_lock_irq(&zone->lru_lock);
  976. do {
  977. struct page *page;
  978. unsigned long nr_taken;
  979. unsigned long nr_scan;
  980. unsigned long nr_freed;
  981. unsigned long nr_active;
  982. unsigned int count[NR_LRU_LISTS] = { 0, };
  983. int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
  984. unsigned long nr_anon;
  985. unsigned long nr_file;
  986. nr_taken = sc->isolate_pages(sc->swap_cluster_max,
  987. &page_list, &nr_scan, sc->order, mode,
  988. zone, sc->mem_cgroup, 0, file);
  989. if (scanning_global_lru(sc)) {
  990. zone->pages_scanned += nr_scan;
  991. if (current_is_kswapd())
  992. __count_zone_vm_events(PGSCAN_KSWAPD, zone,
  993. nr_scan);
  994. else
  995. __count_zone_vm_events(PGSCAN_DIRECT, zone,
  996. nr_scan);
  997. }
  998. if (nr_taken == 0)
  999. goto done;
  1000. nr_active = clear_active_flags(&page_list, count);
  1001. __count_vm_events(PGDEACTIVATE, nr_active);
  1002. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  1003. -count[LRU_ACTIVE_FILE]);
  1004. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  1005. -count[LRU_INACTIVE_FILE]);
  1006. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  1007. -count[LRU_ACTIVE_ANON]);
  1008. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  1009. -count[LRU_INACTIVE_ANON]);
  1010. nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
  1011. nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
  1012. __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
  1013. __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
  1014. reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
  1015. reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
  1016. reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
  1017. reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
  1018. spin_unlock_irq(&zone->lru_lock);
  1019. nr_scanned += nr_scan;
  1020. nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
  1021. /*
  1022. * If we are direct reclaiming for contiguous pages and we do
  1023. * not reclaim everything in the list, try again and wait
  1024. * for IO to complete. This will stall high-order allocations
  1025. * but that should be acceptable to the caller
  1026. */
  1027. if (nr_freed < nr_taken && !current_is_kswapd() &&
  1028. lumpy_reclaim) {
  1029. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1030. /*
  1031. * The attempt at page out may have made some
  1032. * of the pages active, mark them inactive again.
  1033. */
  1034. nr_active = clear_active_flags(&page_list, count);
  1035. count_vm_events(PGDEACTIVATE, nr_active);
  1036. nr_freed += shrink_page_list(&page_list, sc,
  1037. PAGEOUT_IO_SYNC);
  1038. }
  1039. nr_reclaimed += nr_freed;
  1040. local_irq_disable();
  1041. if (current_is_kswapd())
  1042. __count_vm_events(KSWAPD_STEAL, nr_freed);
  1043. __count_zone_vm_events(PGSTEAL, zone, nr_freed);
  1044. spin_lock(&zone->lru_lock);
  1045. /*
  1046. * Put back any unfreeable pages.
  1047. */
  1048. while (!list_empty(&page_list)) {
  1049. int lru;
  1050. page = lru_to_page(&page_list);
  1051. VM_BUG_ON(PageLRU(page));
  1052. list_del(&page->lru);
  1053. if (unlikely(!page_evictable(page, NULL))) {
  1054. spin_unlock_irq(&zone->lru_lock);
  1055. putback_lru_page(page);
  1056. spin_lock_irq(&zone->lru_lock);
  1057. continue;
  1058. }
  1059. SetPageLRU(page);
  1060. lru = page_lru(page);
  1061. add_page_to_lru_list(zone, page, lru);
  1062. if (is_active_lru(lru)) {
  1063. int file = is_file_lru(lru);
  1064. reclaim_stat->recent_rotated[file]++;
  1065. }
  1066. if (!pagevec_add(&pvec, page)) {
  1067. spin_unlock_irq(&zone->lru_lock);
  1068. __pagevec_release(&pvec);
  1069. spin_lock_irq(&zone->lru_lock);
  1070. }
  1071. }
  1072. __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
  1073. __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
  1074. } while (nr_scanned < max_scan);
  1075. done:
  1076. spin_unlock_irq(&zone->lru_lock);
  1077. pagevec_release(&pvec);
  1078. return nr_reclaimed;
  1079. }
  1080. /*
  1081. * We are about to scan this zone at a certain priority level. If that priority
  1082. * level is smaller (ie: more urgent) than the previous priority, then note
  1083. * that priority level within the zone. This is done so that when the next
  1084. * process comes in to scan this zone, it will immediately start out at this
  1085. * priority level rather than having to build up its own scanning priority.
  1086. * Here, this priority affects only the reclaim-mapped threshold.
  1087. */
  1088. static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  1089. {
  1090. if (priority < zone->prev_priority)
  1091. zone->prev_priority = priority;
  1092. }
  1093. /*
  1094. * This moves pages from the active list to the inactive list.
  1095. *
  1096. * We move them the other way if the page is referenced by one or more
  1097. * processes, from rmap.
  1098. *
  1099. * If the pages are mostly unmapped, the processing is fast and it is
  1100. * appropriate to hold zone->lru_lock across the whole operation. But if
  1101. * the pages are mapped, the processing is slow (page_referenced()) so we
  1102. * should drop zone->lru_lock around each page. It's impossible to balance
  1103. * this, so instead we remove the pages from the LRU while processing them.
  1104. * It is safe to rely on PG_active against the non-LRU pages in here because
  1105. * nobody will play with that bit on a non-LRU page.
  1106. *
  1107. * The downside is that we have to touch page->_count against each page.
  1108. * But we had to alter page->flags anyway.
  1109. */
  1110. static void move_active_pages_to_lru(struct zone *zone,
  1111. struct list_head *list,
  1112. enum lru_list lru)
  1113. {
  1114. unsigned long pgmoved = 0;
  1115. struct pagevec pvec;
  1116. struct page *page;
  1117. pagevec_init(&pvec, 1);
  1118. while (!list_empty(list)) {
  1119. page = lru_to_page(list);
  1120. VM_BUG_ON(PageLRU(page));
  1121. SetPageLRU(page);
  1122. list_move(&page->lru, &zone->lru[lru].list);
  1123. mem_cgroup_add_lru_list(page, lru);
  1124. pgmoved++;
  1125. if (!pagevec_add(&pvec, page) || list_empty(list)) {
  1126. spin_unlock_irq(&zone->lru_lock);
  1127. if (buffer_heads_over_limit)
  1128. pagevec_strip(&pvec);
  1129. __pagevec_release(&pvec);
  1130. spin_lock_irq(&zone->lru_lock);
  1131. }
  1132. }
  1133. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1134. if (!is_active_lru(lru))
  1135. __count_vm_events(PGDEACTIVATE, pgmoved);
  1136. }
  1137. static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  1138. struct scan_control *sc, int priority, int file)
  1139. {
  1140. unsigned long nr_taken;
  1141. unsigned long pgscanned;
  1142. unsigned long vm_flags;
  1143. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1144. LIST_HEAD(l_active);
  1145. LIST_HEAD(l_inactive);
  1146. struct page *page;
  1147. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1148. unsigned long nr_rotated = 0;
  1149. lru_add_drain();
  1150. spin_lock_irq(&zone->lru_lock);
  1151. nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
  1152. ISOLATE_ACTIVE, zone,
  1153. sc->mem_cgroup, 1, file);
  1154. /*
  1155. * zone->pages_scanned is used for detect zone's oom
  1156. * mem_cgroup remembers nr_scan by itself.
  1157. */
  1158. if (scanning_global_lru(sc)) {
  1159. zone->pages_scanned += pgscanned;
  1160. }
  1161. reclaim_stat->recent_scanned[file] += nr_taken;
  1162. __count_zone_vm_events(PGREFILL, zone, pgscanned);
  1163. if (file)
  1164. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
  1165. else
  1166. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
  1167. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
  1168. spin_unlock_irq(&zone->lru_lock);
  1169. while (!list_empty(&l_hold)) {
  1170. cond_resched();
  1171. page = lru_to_page(&l_hold);
  1172. list_del(&page->lru);
  1173. if (unlikely(!page_evictable(page, NULL))) {
  1174. putback_lru_page(page);
  1175. continue;
  1176. }
  1177. /* page_referenced clears PageReferenced */
  1178. if (page_mapping_inuse(page) &&
  1179. page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
  1180. nr_rotated++;
  1181. /*
  1182. * Identify referenced, file-backed active pages and
  1183. * give them one more trip around the active list. So
  1184. * that executable code get better chances to stay in
  1185. * memory under moderate memory pressure. Anon pages
  1186. * are not likely to be evicted by use-once streaming
  1187. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1188. * so we ignore them here.
  1189. */
  1190. if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
  1191. list_add(&page->lru, &l_active);
  1192. continue;
  1193. }
  1194. }
  1195. ClearPageActive(page); /* we are de-activating */
  1196. list_add(&page->lru, &l_inactive);
  1197. }
  1198. /*
  1199. * Move pages back to the lru list.
  1200. */
  1201. spin_lock_irq(&zone->lru_lock);
  1202. /*
  1203. * Count referenced pages from currently used mappings as rotated,
  1204. * even though only some of them are actually re-activated. This
  1205. * helps balance scan pressure between file and anonymous pages in
  1206. * get_scan_ratio.
  1207. */
  1208. reclaim_stat->recent_rotated[file] += nr_rotated;
  1209. move_active_pages_to_lru(zone, &l_active,
  1210. LRU_ACTIVE + file * LRU_FILE);
  1211. move_active_pages_to_lru(zone, &l_inactive,
  1212. LRU_BASE + file * LRU_FILE);
  1213. __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
  1214. spin_unlock_irq(&zone->lru_lock);
  1215. }
  1216. static int inactive_anon_is_low_global(struct zone *zone)
  1217. {
  1218. unsigned long active, inactive;
  1219. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1220. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1221. if (inactive * zone->inactive_ratio < active)
  1222. return 1;
  1223. return 0;
  1224. }
  1225. /**
  1226. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1227. * @zone: zone to check
  1228. * @sc: scan control of this context
  1229. *
  1230. * Returns true if the zone does not have enough inactive anon pages,
  1231. * meaning some active anon pages need to be deactivated.
  1232. */
  1233. static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
  1234. {
  1235. int low;
  1236. if (scanning_global_lru(sc))
  1237. low = inactive_anon_is_low_global(zone);
  1238. else
  1239. low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
  1240. return low;
  1241. }
  1242. static int inactive_file_is_low_global(struct zone *zone)
  1243. {
  1244. unsigned long active, inactive;
  1245. active = zone_page_state(zone, NR_ACTIVE_FILE);
  1246. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1247. return (active > inactive);
  1248. }
  1249. /**
  1250. * inactive_file_is_low - check if file pages need to be deactivated
  1251. * @zone: zone to check
  1252. * @sc: scan control of this context
  1253. *
  1254. * When the system is doing streaming IO, memory pressure here
  1255. * ensures that active file pages get deactivated, until more
  1256. * than half of the file pages are on the inactive list.
  1257. *
  1258. * Once we get to that situation, protect the system's working
  1259. * set from being evicted by disabling active file page aging.
  1260. *
  1261. * This uses a different ratio than the anonymous pages, because
  1262. * the page cache uses a use-once replacement algorithm.
  1263. */
  1264. static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
  1265. {
  1266. int low;
  1267. if (scanning_global_lru(sc))
  1268. low = inactive_file_is_low_global(zone);
  1269. else
  1270. low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
  1271. return low;
  1272. }
  1273. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1274. struct zone *zone, struct scan_control *sc, int priority)
  1275. {
  1276. int file = is_file_lru(lru);
  1277. if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
  1278. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1279. return 0;
  1280. }
  1281. if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
  1282. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1283. return 0;
  1284. }
  1285. return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
  1286. }
  1287. /*
  1288. * Determine how aggressively the anon and file LRU lists should be
  1289. * scanned. The relative value of each set of LRU lists is determined
  1290. * by looking at the fraction of the pages scanned we did rotate back
  1291. * onto the active list instead of evict.
  1292. *
  1293. * percent[0] specifies how much pressure to put on ram/swap backed
  1294. * memory, while percent[1] determines pressure on the file LRUs.
  1295. */
  1296. static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
  1297. unsigned long *percent)
  1298. {
  1299. unsigned long anon, file, free;
  1300. unsigned long anon_prio, file_prio;
  1301. unsigned long ap, fp;
  1302. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1303. anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
  1304. zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
  1305. file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
  1306. zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
  1307. if (scanning_global_lru(sc)) {
  1308. free = zone_page_state(zone, NR_FREE_PAGES);
  1309. /* If we have very few page cache pages,
  1310. force-scan anon pages. */
  1311. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1312. percent[0] = 100;
  1313. percent[1] = 0;
  1314. return;
  1315. }
  1316. }
  1317. /*
  1318. * OK, so we have swap space and a fair amount of page cache
  1319. * pages. We use the recently rotated / recently scanned
  1320. * ratios to determine how valuable each cache is.
  1321. *
  1322. * Because workloads change over time (and to avoid overflow)
  1323. * we keep these statistics as a floating average, which ends
  1324. * up weighing recent references more than old ones.
  1325. *
  1326. * anon in [0], file in [1]
  1327. */
  1328. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1329. spin_lock_irq(&zone->lru_lock);
  1330. reclaim_stat->recent_scanned[0] /= 2;
  1331. reclaim_stat->recent_rotated[0] /= 2;
  1332. spin_unlock_irq(&zone->lru_lock);
  1333. }
  1334. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1335. spin_lock_irq(&zone->lru_lock);
  1336. reclaim_stat->recent_scanned[1] /= 2;
  1337. reclaim_stat->recent_rotated[1] /= 2;
  1338. spin_unlock_irq(&zone->lru_lock);
  1339. }
  1340. /*
  1341. * With swappiness at 100, anonymous and file have the same priority.
  1342. * This scanning priority is essentially the inverse of IO cost.
  1343. */
  1344. anon_prio = sc->swappiness;
  1345. file_prio = 200 - sc->swappiness;
  1346. /*
  1347. * The amount of pressure on anon vs file pages is inversely
  1348. * proportional to the fraction of recently scanned pages on
  1349. * each list that were recently referenced and in active use.
  1350. */
  1351. ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
  1352. ap /= reclaim_stat->recent_rotated[0] + 1;
  1353. fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
  1354. fp /= reclaim_stat->recent_rotated[1] + 1;
  1355. /* Normalize to percentages */
  1356. percent[0] = 100 * ap / (ap + fp + 1);
  1357. percent[1] = 100 - percent[0];
  1358. }
  1359. /*
  1360. * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
  1361. * until we collected @swap_cluster_max pages to scan.
  1362. */
  1363. static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
  1364. unsigned long *nr_saved_scan,
  1365. unsigned long swap_cluster_max)
  1366. {
  1367. unsigned long nr;
  1368. *nr_saved_scan += nr_to_scan;
  1369. nr = *nr_saved_scan;
  1370. if (nr >= swap_cluster_max)
  1371. *nr_saved_scan = 0;
  1372. else
  1373. nr = 0;
  1374. return nr;
  1375. }
  1376. /*
  1377. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1378. */
  1379. static void shrink_zone(int priority, struct zone *zone,
  1380. struct scan_control *sc)
  1381. {
  1382. unsigned long nr[NR_LRU_LISTS];
  1383. unsigned long nr_to_scan;
  1384. unsigned long percent[2]; /* anon @ 0; file @ 1 */
  1385. enum lru_list l;
  1386. unsigned long nr_reclaimed = sc->nr_reclaimed;
  1387. unsigned long swap_cluster_max = sc->swap_cluster_max;
  1388. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1389. int noswap = 0;
  1390. /* If we have no swap space, do not bother scanning anon pages. */
  1391. if (!sc->may_swap || (nr_swap_pages <= 0)) {
  1392. noswap = 1;
  1393. percent[0] = 0;
  1394. percent[1] = 100;
  1395. } else
  1396. get_scan_ratio(zone, sc, percent);
  1397. for_each_evictable_lru(l) {
  1398. int file = is_file_lru(l);
  1399. unsigned long scan;
  1400. scan = zone_nr_lru_pages(zone, sc, l);
  1401. if (priority || noswap) {
  1402. scan >>= priority;
  1403. scan = (scan * percent[file]) / 100;
  1404. }
  1405. nr[l] = nr_scan_try_batch(scan,
  1406. &reclaim_stat->nr_saved_scan[l],
  1407. swap_cluster_max);
  1408. }
  1409. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1410. nr[LRU_INACTIVE_FILE]) {
  1411. for_each_evictable_lru(l) {
  1412. if (nr[l]) {
  1413. nr_to_scan = min(nr[l], swap_cluster_max);
  1414. nr[l] -= nr_to_scan;
  1415. nr_reclaimed += shrink_list(l, nr_to_scan,
  1416. zone, sc, priority);
  1417. }
  1418. }
  1419. /*
  1420. * On large memory systems, scan >> priority can become
  1421. * really large. This is fine for the starting priority;
  1422. * we want to put equal scanning pressure on each zone.
  1423. * However, if the VM has a harder time of freeing pages,
  1424. * with multiple processes reclaiming pages, the total
  1425. * freeing target can get unreasonably large.
  1426. */
  1427. if (nr_reclaimed > swap_cluster_max &&
  1428. priority < DEF_PRIORITY && !current_is_kswapd())
  1429. break;
  1430. }
  1431. sc->nr_reclaimed = nr_reclaimed;
  1432. /*
  1433. * Even if we did not try to evict anon pages at all, we want to
  1434. * rebalance the anon lru active/inactive ratio.
  1435. */
  1436. if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
  1437. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1438. throttle_vm_writeout(sc->gfp_mask);
  1439. }
  1440. /*
  1441. * This is the direct reclaim path, for page-allocating processes. We only
  1442. * try to reclaim pages from zones which will satisfy the caller's allocation
  1443. * request.
  1444. *
  1445. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1446. * Because:
  1447. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1448. * allocation or
  1449. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1450. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1451. * zone defense algorithm.
  1452. *
  1453. * If a zone is deemed to be full of pinned pages then just give it a light
  1454. * scan then give up on it.
  1455. */
  1456. static void shrink_zones(int priority, struct zonelist *zonelist,
  1457. struct scan_control *sc)
  1458. {
  1459. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1460. struct zoneref *z;
  1461. struct zone *zone;
  1462. sc->all_unreclaimable = 1;
  1463. for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
  1464. sc->nodemask) {
  1465. if (!populated_zone(zone))
  1466. continue;
  1467. /*
  1468. * Take care memory controller reclaiming has small influence
  1469. * to global LRU.
  1470. */
  1471. if (scanning_global_lru(sc)) {
  1472. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1473. continue;
  1474. note_zone_scanning_priority(zone, priority);
  1475. if (zone_is_all_unreclaimable(zone) &&
  1476. priority != DEF_PRIORITY)
  1477. continue; /* Let kswapd poll it */
  1478. sc->all_unreclaimable = 0;
  1479. } else {
  1480. /*
  1481. * Ignore cpuset limitation here. We just want to reduce
  1482. * # of used pages by us regardless of memory shortage.
  1483. */
  1484. sc->all_unreclaimable = 0;
  1485. mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
  1486. priority);
  1487. }
  1488. shrink_zone(priority, zone, sc);
  1489. }
  1490. }
  1491. /*
  1492. * This is the main entry point to direct page reclaim.
  1493. *
  1494. * If a full scan of the inactive list fails to free enough memory then we
  1495. * are "out of memory" and something needs to be killed.
  1496. *
  1497. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1498. * high - the zone may be full of dirty or under-writeback pages, which this
  1499. * caller can't do much about. We kick pdflush and take explicit naps in the
  1500. * hope that some of these pages can be written. But if the allocating task
  1501. * holds filesystem locks which prevent writeout this might not work, and the
  1502. * allocation attempt will fail.
  1503. *
  1504. * returns: 0, if no pages reclaimed
  1505. * else, the number of pages reclaimed
  1506. */
  1507. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1508. struct scan_control *sc)
  1509. {
  1510. int priority;
  1511. unsigned long ret = 0;
  1512. unsigned long total_scanned = 0;
  1513. struct reclaim_state *reclaim_state = current->reclaim_state;
  1514. unsigned long lru_pages = 0;
  1515. struct zoneref *z;
  1516. struct zone *zone;
  1517. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1518. delayacct_freepages_start();
  1519. if (scanning_global_lru(sc))
  1520. count_vm_event(ALLOCSTALL);
  1521. /*
  1522. * mem_cgroup will not do shrink_slab.
  1523. */
  1524. if (scanning_global_lru(sc)) {
  1525. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1526. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1527. continue;
  1528. lru_pages += zone_reclaimable_pages(zone);
  1529. }
  1530. }
  1531. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1532. sc->nr_scanned = 0;
  1533. if (!priority)
  1534. disable_swap_token();
  1535. shrink_zones(priority, zonelist, sc);
  1536. /*
  1537. * Don't shrink slabs when reclaiming memory from
  1538. * over limit cgroups
  1539. */
  1540. if (scanning_global_lru(sc)) {
  1541. shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
  1542. if (reclaim_state) {
  1543. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  1544. reclaim_state->reclaimed_slab = 0;
  1545. }
  1546. }
  1547. total_scanned += sc->nr_scanned;
  1548. if (sc->nr_reclaimed >= sc->swap_cluster_max) {
  1549. ret = sc->nr_reclaimed;
  1550. goto out;
  1551. }
  1552. /*
  1553. * Try to write back as many pages as we just scanned. This
  1554. * tends to cause slow streaming writers to write data to the
  1555. * disk smoothly, at the dirtying rate, which is nice. But
  1556. * that's undesirable in laptop mode, where we *want* lumpy
  1557. * writeout. So in laptop mode, write out the whole world.
  1558. */
  1559. if (total_scanned > sc->swap_cluster_max +
  1560. sc->swap_cluster_max / 2) {
  1561. wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
  1562. sc->may_writepage = 1;
  1563. }
  1564. /* Take a nap, wait for some writeback to complete */
  1565. if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
  1566. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1567. }
  1568. /* top priority shrink_zones still had more to do? don't OOM, then */
  1569. if (!sc->all_unreclaimable && scanning_global_lru(sc))
  1570. ret = sc->nr_reclaimed;
  1571. out:
  1572. /*
  1573. * Now that we've scanned all the zones at this priority level, note
  1574. * that level within the zone so that the next thread which performs
  1575. * scanning of this zone will immediately start out at this priority
  1576. * level. This affects only the decision whether or not to bring
  1577. * mapped pages onto the inactive list.
  1578. */
  1579. if (priority < 0)
  1580. priority = 0;
  1581. if (scanning_global_lru(sc)) {
  1582. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1583. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1584. continue;
  1585. zone->prev_priority = priority;
  1586. }
  1587. } else
  1588. mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
  1589. delayacct_freepages_end();
  1590. return ret;
  1591. }
  1592. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1593. gfp_t gfp_mask, nodemask_t *nodemask)
  1594. {
  1595. struct scan_control sc = {
  1596. .gfp_mask = gfp_mask,
  1597. .may_writepage = !laptop_mode,
  1598. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1599. .may_unmap = 1,
  1600. .may_swap = 1,
  1601. .swappiness = vm_swappiness,
  1602. .order = order,
  1603. .mem_cgroup = NULL,
  1604. .isolate_pages = isolate_pages_global,
  1605. .nodemask = nodemask,
  1606. };
  1607. return do_try_to_free_pages(zonelist, &sc);
  1608. }
  1609. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1610. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  1611. gfp_t gfp_mask,
  1612. bool noswap,
  1613. unsigned int swappiness)
  1614. {
  1615. struct scan_control sc = {
  1616. .may_writepage = !laptop_mode,
  1617. .may_unmap = 1,
  1618. .may_swap = !noswap,
  1619. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1620. .swappiness = swappiness,
  1621. .order = 0,
  1622. .mem_cgroup = mem_cont,
  1623. .isolate_pages = mem_cgroup_isolate_pages,
  1624. .nodemask = NULL, /* we don't care the placement */
  1625. };
  1626. struct zonelist *zonelist;
  1627. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1628. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1629. zonelist = NODE_DATA(numa_node_id())->node_zonelists;
  1630. return do_try_to_free_pages(zonelist, &sc);
  1631. }
  1632. #endif
  1633. /*
  1634. * For kswapd, balance_pgdat() will work across all this node's zones until
  1635. * they are all at high_wmark_pages(zone).
  1636. *
  1637. * Returns the number of pages which were actually freed.
  1638. *
  1639. * There is special handling here for zones which are full of pinned pages.
  1640. * This can happen if the pages are all mlocked, or if they are all used by
  1641. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  1642. * What we do is to detect the case where all pages in the zone have been
  1643. * scanned twice and there has been zero successful reclaim. Mark the zone as
  1644. * dead and from now on, only perform a short scan. Basically we're polling
  1645. * the zone for when the problem goes away.
  1646. *
  1647. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  1648. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  1649. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  1650. * lower zones regardless of the number of free pages in the lower zones. This
  1651. * interoperates with the page allocator fallback scheme to ensure that aging
  1652. * of pages is balanced across the zones.
  1653. */
  1654. static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
  1655. {
  1656. int all_zones_ok;
  1657. int priority;
  1658. int i;
  1659. unsigned long total_scanned;
  1660. struct reclaim_state *reclaim_state = current->reclaim_state;
  1661. struct scan_control sc = {
  1662. .gfp_mask = GFP_KERNEL,
  1663. .may_unmap = 1,
  1664. .may_swap = 1,
  1665. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1666. .swappiness = vm_swappiness,
  1667. .order = order,
  1668. .mem_cgroup = NULL,
  1669. .isolate_pages = isolate_pages_global,
  1670. };
  1671. /*
  1672. * temp_priority is used to remember the scanning priority at which
  1673. * this zone was successfully refilled to
  1674. * free_pages == high_wmark_pages(zone).
  1675. */
  1676. int temp_priority[MAX_NR_ZONES];
  1677. loop_again:
  1678. total_scanned = 0;
  1679. sc.nr_reclaimed = 0;
  1680. sc.may_writepage = !laptop_mode;
  1681. count_vm_event(PAGEOUTRUN);
  1682. for (i = 0; i < pgdat->nr_zones; i++)
  1683. temp_priority[i] = DEF_PRIORITY;
  1684. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1685. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  1686. unsigned long lru_pages = 0;
  1687. /* The swap token gets in the way of swapout... */
  1688. if (!priority)
  1689. disable_swap_token();
  1690. all_zones_ok = 1;
  1691. /*
  1692. * Scan in the highmem->dma direction for the highest
  1693. * zone which needs scanning
  1694. */
  1695. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  1696. struct zone *zone = pgdat->node_zones + i;
  1697. if (!populated_zone(zone))
  1698. continue;
  1699. if (zone_is_all_unreclaimable(zone) &&
  1700. priority != DEF_PRIORITY)
  1701. continue;
  1702. /*
  1703. * Do some background aging of the anon list, to give
  1704. * pages a chance to be referenced before reclaiming.
  1705. */
  1706. if (inactive_anon_is_low(zone, &sc))
  1707. shrink_active_list(SWAP_CLUSTER_MAX, zone,
  1708. &sc, priority, 0);
  1709. if (!zone_watermark_ok(zone, order,
  1710. high_wmark_pages(zone), 0, 0)) {
  1711. end_zone = i;
  1712. break;
  1713. }
  1714. }
  1715. if (i < 0)
  1716. goto out;
  1717. for (i = 0; i <= end_zone; i++) {
  1718. struct zone *zone = pgdat->node_zones + i;
  1719. lru_pages += zone_reclaimable_pages(zone);
  1720. }
  1721. /*
  1722. * Now scan the zone in the dma->highmem direction, stopping
  1723. * at the last zone which needs scanning.
  1724. *
  1725. * We do this because the page allocator works in the opposite
  1726. * direction. This prevents the page allocator from allocating
  1727. * pages behind kswapd's direction of progress, which would
  1728. * cause too much scanning of the lower zones.
  1729. */
  1730. for (i = 0; i <= end_zone; i++) {
  1731. struct zone *zone = pgdat->node_zones + i;
  1732. int nr_slab;
  1733. if (!populated_zone(zone))
  1734. continue;
  1735. if (zone_is_all_unreclaimable(zone) &&
  1736. priority != DEF_PRIORITY)
  1737. continue;
  1738. if (!zone_watermark_ok(zone, order,
  1739. high_wmark_pages(zone), end_zone, 0))
  1740. all_zones_ok = 0;
  1741. temp_priority[i] = priority;
  1742. sc.nr_scanned = 0;
  1743. note_zone_scanning_priority(zone, priority);
  1744. /*
  1745. * We put equal pressure on every zone, unless one
  1746. * zone has way too many pages free already.
  1747. */
  1748. if (!zone_watermark_ok(zone, order,
  1749. 8*high_wmark_pages(zone), end_zone, 0))
  1750. shrink_zone(priority, zone, &sc);
  1751. reclaim_state->reclaimed_slab = 0;
  1752. nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
  1753. lru_pages);
  1754. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  1755. total_scanned += sc.nr_scanned;
  1756. if (zone_is_all_unreclaimable(zone))
  1757. continue;
  1758. if (nr_slab == 0 && zone->pages_scanned >=
  1759. (zone_reclaimable_pages(zone) * 6))
  1760. zone_set_flag(zone,
  1761. ZONE_ALL_UNRECLAIMABLE);
  1762. /*
  1763. * If we've done a decent amount of scanning and
  1764. * the reclaim ratio is low, start doing writepage
  1765. * even in laptop mode
  1766. */
  1767. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  1768. total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
  1769. sc.may_writepage = 1;
  1770. }
  1771. if (all_zones_ok)
  1772. break; /* kswapd: all done */
  1773. /*
  1774. * OK, kswapd is getting into trouble. Take a nap, then take
  1775. * another pass across the zones.
  1776. */
  1777. if (total_scanned && priority < DEF_PRIORITY - 2)
  1778. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1779. /*
  1780. * We do this so kswapd doesn't build up large priorities for
  1781. * example when it is freeing in parallel with allocators. It
  1782. * matches the direct reclaim path behaviour in terms of impact
  1783. * on zone->*_priority.
  1784. */
  1785. if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
  1786. break;
  1787. }
  1788. out:
  1789. /*
  1790. * Note within each zone the priority level at which this zone was
  1791. * brought into a happy state. So that the next thread which scans this
  1792. * zone will start out at that priority level.
  1793. */
  1794. for (i = 0; i < pgdat->nr_zones; i++) {
  1795. struct zone *zone = pgdat->node_zones + i;
  1796. zone->prev_priority = temp_priority[i];
  1797. }
  1798. if (!all_zones_ok) {
  1799. cond_resched();
  1800. try_to_freeze();
  1801. /*
  1802. * Fragmentation may mean that the system cannot be
  1803. * rebalanced for high-order allocations in all zones.
  1804. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
  1805. * it means the zones have been fully scanned and are still
  1806. * not balanced. For high-order allocations, there is
  1807. * little point trying all over again as kswapd may
  1808. * infinite loop.
  1809. *
  1810. * Instead, recheck all watermarks at order-0 as they
  1811. * are the most important. If watermarks are ok, kswapd will go
  1812. * back to sleep. High-order users can still perform direct
  1813. * reclaim if they wish.
  1814. */
  1815. if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
  1816. order = sc.order = 0;
  1817. goto loop_again;
  1818. }
  1819. return sc.nr_reclaimed;
  1820. }
  1821. /*
  1822. * The background pageout daemon, started as a kernel thread
  1823. * from the init process.
  1824. *
  1825. * This basically trickles out pages so that we have _some_
  1826. * free memory available even if there is no other activity
  1827. * that frees anything up. This is needed for things like routing
  1828. * etc, where we otherwise might have all activity going on in
  1829. * asynchronous contexts that cannot page things out.
  1830. *
  1831. * If there are applications that are active memory-allocators
  1832. * (most normal use), this basically shouldn't matter.
  1833. */
  1834. static int kswapd(void *p)
  1835. {
  1836. unsigned long order;
  1837. pg_data_t *pgdat = (pg_data_t*)p;
  1838. struct task_struct *tsk = current;
  1839. DEFINE_WAIT(wait);
  1840. struct reclaim_state reclaim_state = {
  1841. .reclaimed_slab = 0,
  1842. };
  1843. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1844. lockdep_set_current_reclaim_state(GFP_KERNEL);
  1845. if (!cpumask_empty(cpumask))
  1846. set_cpus_allowed_ptr(tsk, cpumask);
  1847. current->reclaim_state = &reclaim_state;
  1848. /*
  1849. * Tell the memory management that we're a "memory allocator",
  1850. * and that if we need more memory we should get access to it
  1851. * regardless (see "__alloc_pages()"). "kswapd" should
  1852. * never get caught in the normal page freeing logic.
  1853. *
  1854. * (Kswapd normally doesn't need memory anyway, but sometimes
  1855. * you need a small amount of memory in order to be able to
  1856. * page out something else, and this flag essentially protects
  1857. * us from recursively trying to free more memory as we're
  1858. * trying to free the first piece of memory in the first place).
  1859. */
  1860. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  1861. set_freezable();
  1862. order = 0;
  1863. for ( ; ; ) {
  1864. unsigned long new_order;
  1865. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  1866. new_order = pgdat->kswapd_max_order;
  1867. pgdat->kswapd_max_order = 0;
  1868. if (order < new_order) {
  1869. /*
  1870. * Don't sleep if someone wants a larger 'order'
  1871. * allocation
  1872. */
  1873. order = new_order;
  1874. } else {
  1875. if (!freezing(current))
  1876. schedule();
  1877. order = pgdat->kswapd_max_order;
  1878. }
  1879. finish_wait(&pgdat->kswapd_wait, &wait);
  1880. if (!try_to_freeze()) {
  1881. /* We can speed up thawing tasks if we don't call
  1882. * balance_pgdat after returning from the refrigerator
  1883. */
  1884. balance_pgdat(pgdat, order);
  1885. }
  1886. }
  1887. return 0;
  1888. }
  1889. /*
  1890. * A zone is low on free memory, so wake its kswapd task to service it.
  1891. */
  1892. void wakeup_kswapd(struct zone *zone, int order)
  1893. {
  1894. pg_data_t *pgdat;
  1895. if (!populated_zone(zone))
  1896. return;
  1897. pgdat = zone->zone_pgdat;
  1898. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
  1899. return;
  1900. if (pgdat->kswapd_max_order < order)
  1901. pgdat->kswapd_max_order = order;
  1902. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1903. return;
  1904. if (!waitqueue_active(&pgdat->kswapd_wait))
  1905. return;
  1906. wake_up_interruptible(&pgdat->kswapd_wait);
  1907. }
  1908. /*
  1909. * The reclaimable count would be mostly accurate.
  1910. * The less reclaimable pages may be
  1911. * - mlocked pages, which will be moved to unevictable list when encountered
  1912. * - mapped pages, which may require several travels to be reclaimed
  1913. * - dirty pages, which is not "instantly" reclaimable
  1914. */
  1915. unsigned long global_reclaimable_pages(void)
  1916. {
  1917. int nr;
  1918. nr = global_page_state(NR_ACTIVE_FILE) +
  1919. global_page_state(NR_INACTIVE_FILE);
  1920. if (nr_swap_pages > 0)
  1921. nr += global_page_state(NR_ACTIVE_ANON) +
  1922. global_page_state(NR_INACTIVE_ANON);
  1923. return nr;
  1924. }
  1925. unsigned long zone_reclaimable_pages(struct zone *zone)
  1926. {
  1927. int nr;
  1928. nr = zone_page_state(zone, NR_ACTIVE_FILE) +
  1929. zone_page_state(zone, NR_INACTIVE_FILE);
  1930. if (nr_swap_pages > 0)
  1931. nr += zone_page_state(zone, NR_ACTIVE_ANON) +
  1932. zone_page_state(zone, NR_INACTIVE_ANON);
  1933. return nr;
  1934. }
  1935. #ifdef CONFIG_HIBERNATION
  1936. /*
  1937. * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
  1938. * from LRU lists system-wide, for given pass and priority.
  1939. *
  1940. * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  1941. */
  1942. static void shrink_all_zones(unsigned long nr_pages, int prio,
  1943. int pass, struct scan_control *sc)
  1944. {
  1945. struct zone *zone;
  1946. unsigned long nr_reclaimed = 0;
  1947. struct zone_reclaim_stat *reclaim_stat;
  1948. for_each_populated_zone(zone) {
  1949. enum lru_list l;
  1950. if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
  1951. continue;
  1952. for_each_evictable_lru(l) {
  1953. enum zone_stat_item ls = NR_LRU_BASE + l;
  1954. unsigned long lru_pages = zone_page_state(zone, ls);
  1955. /* For pass = 0, we don't shrink the active list */
  1956. if (pass == 0 && (l == LRU_ACTIVE_ANON ||
  1957. l == LRU_ACTIVE_FILE))
  1958. continue;
  1959. reclaim_stat = get_reclaim_stat(zone, sc);
  1960. reclaim_stat->nr_saved_scan[l] +=
  1961. (lru_pages >> prio) + 1;
  1962. if (reclaim_stat->nr_saved_scan[l]
  1963. >= nr_pages || pass > 3) {
  1964. unsigned long nr_to_scan;
  1965. reclaim_stat->nr_saved_scan[l] = 0;
  1966. nr_to_scan = min(nr_pages, lru_pages);
  1967. nr_reclaimed += shrink_list(l, nr_to_scan, zone,
  1968. sc, prio);
  1969. if (nr_reclaimed >= nr_pages) {
  1970. sc->nr_reclaimed += nr_reclaimed;
  1971. return;
  1972. }
  1973. }
  1974. }
  1975. }
  1976. sc->nr_reclaimed += nr_reclaimed;
  1977. }
  1978. /*
  1979. * Try to free `nr_pages' of memory, system-wide, and return the number of
  1980. * freed pages.
  1981. *
  1982. * Rather than trying to age LRUs the aim is to preserve the overall
  1983. * LRU order by reclaiming preferentially
  1984. * inactive > active > active referenced > active mapped
  1985. */
  1986. unsigned long shrink_all_memory(unsigned long nr_pages)
  1987. {
  1988. unsigned long lru_pages, nr_slab;
  1989. int pass;
  1990. struct reclaim_state reclaim_state;
  1991. struct scan_control sc = {
  1992. .gfp_mask = GFP_KERNEL,
  1993. .may_unmap = 0,
  1994. .may_writepage = 1,
  1995. .isolate_pages = isolate_pages_global,
  1996. .nr_reclaimed = 0,
  1997. };
  1998. current->reclaim_state = &reclaim_state;
  1999. lru_pages = global_reclaimable_pages();
  2000. nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
  2001. /* If slab caches are huge, it's better to hit them first */
  2002. while (nr_slab >= lru_pages) {
  2003. reclaim_state.reclaimed_slab = 0;
  2004. shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
  2005. if (!reclaim_state.reclaimed_slab)
  2006. break;
  2007. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  2008. if (sc.nr_reclaimed >= nr_pages)
  2009. goto out;
  2010. nr_slab -= reclaim_state.reclaimed_slab;
  2011. }
  2012. /*
  2013. * We try to shrink LRUs in 5 passes:
  2014. * 0 = Reclaim from inactive_list only
  2015. * 1 = Reclaim from active list but don't reclaim mapped
  2016. * 2 = 2nd pass of type 1
  2017. * 3 = Reclaim mapped (normal reclaim)
  2018. * 4 = 2nd pass of type 3
  2019. */
  2020. for (pass = 0; pass < 5; pass++) {
  2021. int prio;
  2022. /* Force reclaiming mapped pages in the passes #3 and #4 */
  2023. if (pass > 2)
  2024. sc.may_unmap = 1;
  2025. for (prio = DEF_PRIORITY; prio >= 0; prio--) {
  2026. unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
  2027. sc.nr_scanned = 0;
  2028. sc.swap_cluster_max = nr_to_scan;
  2029. shrink_all_zones(nr_to_scan, prio, pass, &sc);
  2030. if (sc.nr_reclaimed >= nr_pages)
  2031. goto out;
  2032. reclaim_state.reclaimed_slab = 0;
  2033. shrink_slab(sc.nr_scanned, sc.gfp_mask,
  2034. global_reclaimable_pages());
  2035. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  2036. if (sc.nr_reclaimed >= nr_pages)
  2037. goto out;
  2038. if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
  2039. congestion_wait(BLK_RW_ASYNC, HZ / 10);
  2040. }
  2041. }
  2042. /*
  2043. * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
  2044. * something in slab caches
  2045. */
  2046. if (!sc.nr_reclaimed) {
  2047. do {
  2048. reclaim_state.reclaimed_slab = 0;
  2049. shrink_slab(nr_pages, sc.gfp_mask,
  2050. global_reclaimable_pages());
  2051. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  2052. } while (sc.nr_reclaimed < nr_pages &&
  2053. reclaim_state.reclaimed_slab > 0);
  2054. }
  2055. out:
  2056. current->reclaim_state = NULL;
  2057. return sc.nr_reclaimed;
  2058. }
  2059. #endif /* CONFIG_HIBERNATION */
  2060. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  2061. not required for correctness. So if the last cpu in a node goes
  2062. away, we get changed to run anywhere: as the first one comes back,
  2063. restore their cpu bindings. */
  2064. static int __devinit cpu_callback(struct notifier_block *nfb,
  2065. unsigned long action, void *hcpu)
  2066. {
  2067. int nid;
  2068. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  2069. for_each_node_state(nid, N_HIGH_MEMORY) {
  2070. pg_data_t *pgdat = NODE_DATA(nid);
  2071. const struct cpumask *mask;
  2072. mask = cpumask_of_node(pgdat->node_id);
  2073. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  2074. /* One of our CPUs online: restore mask */
  2075. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2076. }
  2077. }
  2078. return NOTIFY_OK;
  2079. }
  2080. /*
  2081. * This kswapd start function will be called by init and node-hot-add.
  2082. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2083. */
  2084. int kswapd_run(int nid)
  2085. {
  2086. pg_data_t *pgdat = NODE_DATA(nid);
  2087. int ret = 0;
  2088. if (pgdat->kswapd)
  2089. return 0;
  2090. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2091. if (IS_ERR(pgdat->kswapd)) {
  2092. /* failure at boot is fatal */
  2093. BUG_ON(system_state == SYSTEM_BOOTING);
  2094. printk("Failed to start kswapd on node %d\n",nid);
  2095. ret = -1;
  2096. }
  2097. return ret;
  2098. }
  2099. static int __init kswapd_init(void)
  2100. {
  2101. int nid;
  2102. swap_setup();
  2103. for_each_node_state(nid, N_HIGH_MEMORY)
  2104. kswapd_run(nid);
  2105. hotcpu_notifier(cpu_callback, 0);
  2106. return 0;
  2107. }
  2108. module_init(kswapd_init)
  2109. #ifdef CONFIG_NUMA
  2110. /*
  2111. * Zone reclaim mode
  2112. *
  2113. * If non-zero call zone_reclaim when the number of free pages falls below
  2114. * the watermarks.
  2115. */
  2116. int zone_reclaim_mode __read_mostly;
  2117. #define RECLAIM_OFF 0
  2118. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2119. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2120. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2121. /*
  2122. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2123. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2124. * a zone.
  2125. */
  2126. #define ZONE_RECLAIM_PRIORITY 4
  2127. /*
  2128. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2129. * occur.
  2130. */
  2131. int sysctl_min_unmapped_ratio = 1;
  2132. /*
  2133. * If the number of slab pages in a zone grows beyond this percentage then
  2134. * slab reclaim needs to occur.
  2135. */
  2136. int sysctl_min_slab_ratio = 5;
  2137. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  2138. {
  2139. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  2140. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  2141. zone_page_state(zone, NR_ACTIVE_FILE);
  2142. /*
  2143. * It's possible for there to be more file mapped pages than
  2144. * accounted for by the pages on the file LRU lists because
  2145. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  2146. */
  2147. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  2148. }
  2149. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  2150. static long zone_pagecache_reclaimable(struct zone *zone)
  2151. {
  2152. long nr_pagecache_reclaimable;
  2153. long delta = 0;
  2154. /*
  2155. * If RECLAIM_SWAP is set, then all file pages are considered
  2156. * potentially reclaimable. Otherwise, we have to worry about
  2157. * pages like swapcache and zone_unmapped_file_pages() provides
  2158. * a better estimate
  2159. */
  2160. if (zone_reclaim_mode & RECLAIM_SWAP)
  2161. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  2162. else
  2163. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  2164. /* If we can't clean pages, remove dirty pages from consideration */
  2165. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  2166. delta += zone_page_state(zone, NR_FILE_DIRTY);
  2167. /* Watch for any possible underflows due to delta */
  2168. if (unlikely(delta > nr_pagecache_reclaimable))
  2169. delta = nr_pagecache_reclaimable;
  2170. return nr_pagecache_reclaimable - delta;
  2171. }
  2172. /*
  2173. * Try to free up some pages from this zone through reclaim.
  2174. */
  2175. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2176. {
  2177. /* Minimum pages needed in order to stay on node */
  2178. const unsigned long nr_pages = 1 << order;
  2179. struct task_struct *p = current;
  2180. struct reclaim_state reclaim_state;
  2181. int priority;
  2182. struct scan_control sc = {
  2183. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  2184. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  2185. .may_swap = 1,
  2186. .swap_cluster_max = max_t(unsigned long, nr_pages,
  2187. SWAP_CLUSTER_MAX),
  2188. .gfp_mask = gfp_mask,
  2189. .swappiness = vm_swappiness,
  2190. .order = order,
  2191. .isolate_pages = isolate_pages_global,
  2192. };
  2193. unsigned long slab_reclaimable;
  2194. disable_swap_token();
  2195. cond_resched();
  2196. /*
  2197. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  2198. * and we also need to be able to write out pages for RECLAIM_WRITE
  2199. * and RECLAIM_SWAP.
  2200. */
  2201. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  2202. reclaim_state.reclaimed_slab = 0;
  2203. p->reclaim_state = &reclaim_state;
  2204. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  2205. /*
  2206. * Free memory by calling shrink zone with increasing
  2207. * priorities until we have enough memory freed.
  2208. */
  2209. priority = ZONE_RECLAIM_PRIORITY;
  2210. do {
  2211. note_zone_scanning_priority(zone, priority);
  2212. shrink_zone(priority, zone, &sc);
  2213. priority--;
  2214. } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
  2215. }
  2216. slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2217. if (slab_reclaimable > zone->min_slab_pages) {
  2218. /*
  2219. * shrink_slab() does not currently allow us to determine how
  2220. * many pages were freed in this zone. So we take the current
  2221. * number of slab pages and shake the slab until it is reduced
  2222. * by the same nr_pages that we used for reclaiming unmapped
  2223. * pages.
  2224. *
  2225. * Note that shrink_slab will free memory on all zones and may
  2226. * take a long time.
  2227. */
  2228. while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
  2229. zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
  2230. slab_reclaimable - nr_pages)
  2231. ;
  2232. /*
  2233. * Update nr_reclaimed by the number of slab pages we
  2234. * reclaimed from this zone.
  2235. */
  2236. sc.nr_reclaimed += slab_reclaimable -
  2237. zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2238. }
  2239. p->reclaim_state = NULL;
  2240. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  2241. return sc.nr_reclaimed >= nr_pages;
  2242. }
  2243. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2244. {
  2245. int node_id;
  2246. int ret;
  2247. /*
  2248. * Zone reclaim reclaims unmapped file backed pages and
  2249. * slab pages if we are over the defined limits.
  2250. *
  2251. * A small portion of unmapped file backed pages is needed for
  2252. * file I/O otherwise pages read by file I/O will be immediately
  2253. * thrown out if the zone is overallocated. So we do not reclaim
  2254. * if less than a specified percentage of the zone is used by
  2255. * unmapped file backed pages.
  2256. */
  2257. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  2258. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  2259. return ZONE_RECLAIM_FULL;
  2260. if (zone_is_all_unreclaimable(zone))
  2261. return ZONE_RECLAIM_FULL;
  2262. /*
  2263. * Do not scan if the allocation should not be delayed.
  2264. */
  2265. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  2266. return ZONE_RECLAIM_NOSCAN;
  2267. /*
  2268. * Only run zone reclaim on the local zone or on zones that do not
  2269. * have associated processors. This will favor the local processor
  2270. * over remote processors and spread off node memory allocations
  2271. * as wide as possible.
  2272. */
  2273. node_id = zone_to_nid(zone);
  2274. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  2275. return ZONE_RECLAIM_NOSCAN;
  2276. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  2277. return ZONE_RECLAIM_NOSCAN;
  2278. ret = __zone_reclaim(zone, gfp_mask, order);
  2279. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  2280. if (!ret)
  2281. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  2282. return ret;
  2283. }
  2284. #endif
  2285. /*
  2286. * page_evictable - test whether a page is evictable
  2287. * @page: the page to test
  2288. * @vma: the VMA in which the page is or will be mapped, may be NULL
  2289. *
  2290. * Test whether page is evictable--i.e., should be placed on active/inactive
  2291. * lists vs unevictable list. The vma argument is !NULL when called from the
  2292. * fault path to determine how to instantate a new page.
  2293. *
  2294. * Reasons page might not be evictable:
  2295. * (1) page's mapping marked unevictable
  2296. * (2) page is part of an mlocked VMA
  2297. *
  2298. */
  2299. int page_evictable(struct page *page, struct vm_area_struct *vma)
  2300. {
  2301. if (mapping_unevictable(page_mapping(page)))
  2302. return 0;
  2303. if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
  2304. return 0;
  2305. return 1;
  2306. }
  2307. /**
  2308. * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
  2309. * @page: page to check evictability and move to appropriate lru list
  2310. * @zone: zone page is in
  2311. *
  2312. * Checks a page for evictability and moves the page to the appropriate
  2313. * zone lru list.
  2314. *
  2315. * Restrictions: zone->lru_lock must be held, page must be on LRU and must
  2316. * have PageUnevictable set.
  2317. */
  2318. static void check_move_unevictable_page(struct page *page, struct zone *zone)
  2319. {
  2320. VM_BUG_ON(PageActive(page));
  2321. retry:
  2322. ClearPageUnevictable(page);
  2323. if (page_evictable(page, NULL)) {
  2324. enum lru_list l = page_lru_base_type(page);
  2325. __dec_zone_state(zone, NR_UNEVICTABLE);
  2326. list_move(&page->lru, &zone->lru[l].list);
  2327. mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
  2328. __inc_zone_state(zone, NR_INACTIVE_ANON + l);
  2329. __count_vm_event(UNEVICTABLE_PGRESCUED);
  2330. } else {
  2331. /*
  2332. * rotate unevictable list
  2333. */
  2334. SetPageUnevictable(page);
  2335. list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
  2336. mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
  2337. if (page_evictable(page, NULL))
  2338. goto retry;
  2339. }
  2340. }
  2341. /**
  2342. * scan_mapping_unevictable_pages - scan an address space for evictable pages
  2343. * @mapping: struct address_space to scan for evictable pages
  2344. *
  2345. * Scan all pages in mapping. Check unevictable pages for
  2346. * evictability and move them to the appropriate zone lru list.
  2347. */
  2348. void scan_mapping_unevictable_pages(struct address_space *mapping)
  2349. {
  2350. pgoff_t next = 0;
  2351. pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
  2352. PAGE_CACHE_SHIFT;
  2353. struct zone *zone;
  2354. struct pagevec pvec;
  2355. if (mapping->nrpages == 0)
  2356. return;
  2357. pagevec_init(&pvec, 0);
  2358. while (next < end &&
  2359. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  2360. int i;
  2361. int pg_scanned = 0;
  2362. zone = NULL;
  2363. for (i = 0; i < pagevec_count(&pvec); i++) {
  2364. struct page *page = pvec.pages[i];
  2365. pgoff_t page_index = page->index;
  2366. struct zone *pagezone = page_zone(page);
  2367. pg_scanned++;
  2368. if (page_index > next)
  2369. next = page_index;
  2370. next++;
  2371. if (pagezone != zone) {
  2372. if (zone)
  2373. spin_unlock_irq(&zone->lru_lock);
  2374. zone = pagezone;
  2375. spin_lock_irq(&zone->lru_lock);
  2376. }
  2377. if (PageLRU(page) && PageUnevictable(page))
  2378. check_move_unevictable_page(page, zone);
  2379. }
  2380. if (zone)
  2381. spin_unlock_irq(&zone->lru_lock);
  2382. pagevec_release(&pvec);
  2383. count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
  2384. }
  2385. }
  2386. /**
  2387. * scan_zone_unevictable_pages - check unevictable list for evictable pages
  2388. * @zone - zone of which to scan the unevictable list
  2389. *
  2390. * Scan @zone's unevictable LRU lists to check for pages that have become
  2391. * evictable. Move those that have to @zone's inactive list where they
  2392. * become candidates for reclaim, unless shrink_inactive_zone() decides
  2393. * to reactivate them. Pages that are still unevictable are rotated
  2394. * back onto @zone's unevictable list.
  2395. */
  2396. #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
  2397. static void scan_zone_unevictable_pages(struct zone *zone)
  2398. {
  2399. struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
  2400. unsigned long scan;
  2401. unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
  2402. while (nr_to_scan > 0) {
  2403. unsigned long batch_size = min(nr_to_scan,
  2404. SCAN_UNEVICTABLE_BATCH_SIZE);
  2405. spin_lock_irq(&zone->lru_lock);
  2406. for (scan = 0; scan < batch_size; scan++) {
  2407. struct page *page = lru_to_page(l_unevictable);
  2408. if (!trylock_page(page))
  2409. continue;
  2410. prefetchw_prev_lru_page(page, l_unevictable, flags);
  2411. if (likely(PageLRU(page) && PageUnevictable(page)))
  2412. check_move_unevictable_page(page, zone);
  2413. unlock_page(page);
  2414. }
  2415. spin_unlock_irq(&zone->lru_lock);
  2416. nr_to_scan -= batch_size;
  2417. }
  2418. }
  2419. /**
  2420. * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
  2421. *
  2422. * A really big hammer: scan all zones' unevictable LRU lists to check for
  2423. * pages that have become evictable. Move those back to the zones'
  2424. * inactive list where they become candidates for reclaim.
  2425. * This occurs when, e.g., we have unswappable pages on the unevictable lists,
  2426. * and we add swap to the system. As such, it runs in the context of a task
  2427. * that has possibly/probably made some previously unevictable pages
  2428. * evictable.
  2429. */
  2430. static void scan_all_zones_unevictable_pages(void)
  2431. {
  2432. struct zone *zone;
  2433. for_each_zone(zone) {
  2434. scan_zone_unevictable_pages(zone);
  2435. }
  2436. }
  2437. /*
  2438. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  2439. * all nodes' unevictable lists for evictable pages
  2440. */
  2441. unsigned long scan_unevictable_pages;
  2442. int scan_unevictable_handler(struct ctl_table *table, int write,
  2443. struct file *file, void __user *buffer,
  2444. size_t *length, loff_t *ppos)
  2445. {
  2446. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  2447. if (write && *(unsigned long *)table->data)
  2448. scan_all_zones_unevictable_pages();
  2449. scan_unevictable_pages = 0;
  2450. return 0;
  2451. }
  2452. /*
  2453. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  2454. * a specified node's per zone unevictable lists for evictable pages.
  2455. */
  2456. static ssize_t read_scan_unevictable_node(struct sys_device *dev,
  2457. struct sysdev_attribute *attr,
  2458. char *buf)
  2459. {
  2460. return sprintf(buf, "0\n"); /* always zero; should fit... */
  2461. }
  2462. static ssize_t write_scan_unevictable_node(struct sys_device *dev,
  2463. struct sysdev_attribute *attr,
  2464. const char *buf, size_t count)
  2465. {
  2466. struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
  2467. struct zone *zone;
  2468. unsigned long res;
  2469. unsigned long req = strict_strtoul(buf, 10, &res);
  2470. if (!req)
  2471. return 1; /* zero is no-op */
  2472. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  2473. if (!populated_zone(zone))
  2474. continue;
  2475. scan_zone_unevictable_pages(zone);
  2476. }
  2477. return 1;
  2478. }
  2479. static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  2480. read_scan_unevictable_node,
  2481. write_scan_unevictable_node);
  2482. int scan_unevictable_register_node(struct node *node)
  2483. {
  2484. return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
  2485. }
  2486. void scan_unevictable_unregister_node(struct node *node)
  2487. {
  2488. sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
  2489. }