vmscan.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768
  1. /*
  2. * linux/mm/vmscan.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95, Stephen Tweedie.
  7. * kswapd added: 7.1.96 sct
  8. * Removed kswapd_ctl limits, and swap out as many pages as needed
  9. * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10. * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11. * Multiqueue VM started 5.8.00, Rik van Riel.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/vmstat.h>
  22. #include <linux/file.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/buffer_head.h> /* for try_to_release_page(),
  26. buffer_heads_over_limit */
  27. #include <linux/mm_inline.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/rmap.h>
  31. #include <linux/topology.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/notifier.h>
  35. #include <linux/rwsem.h>
  36. #include <linux/delay.h>
  37. #include <linux/kthread.h>
  38. #include <linux/freezer.h>
  39. #include <linux/memcontrol.h>
  40. #include <linux/delayacct.h>
  41. #include <linux/sysctl.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/div64.h>
  44. #include <linux/swapops.h>
  45. #include "internal.h"
  46. struct scan_control {
  47. /* Incremented by the number of inactive pages that were scanned */
  48. unsigned long nr_scanned;
  49. /* Number of pages freed so far during a call to shrink_zones() */
  50. unsigned long nr_reclaimed;
  51. /* This context's GFP mask */
  52. gfp_t gfp_mask;
  53. int may_writepage;
  54. /* Can mapped pages be reclaimed? */
  55. int may_unmap;
  56. /* Can pages be swapped as part of reclaim? */
  57. int may_swap;
  58. /* This context's SWAP_CLUSTER_MAX. If freeing memory for
  59. * suspend, we effectively ignore SWAP_CLUSTER_MAX.
  60. * In this context, it doesn't matter that we scan the
  61. * whole list at once. */
  62. int swap_cluster_max;
  63. int swappiness;
  64. int all_unreclaimable;
  65. int order;
  66. /* Which cgroup do we reclaim from */
  67. struct mem_cgroup *mem_cgroup;
  68. /*
  69. * Nodemask of nodes allowed by the caller. If NULL, all nodes
  70. * are scanned.
  71. */
  72. nodemask_t *nodemask;
  73. /* Pluggable isolate pages callback */
  74. unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
  75. unsigned long *scanned, int order, int mode,
  76. struct zone *z, struct mem_cgroup *mem_cont,
  77. int active, int file);
  78. };
  79. #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  80. #ifdef ARCH_HAS_PREFETCH
  81. #define prefetch_prev_lru_page(_page, _base, _field) \
  82. do { \
  83. if ((_page)->lru.prev != _base) { \
  84. struct page *prev; \
  85. \
  86. prev = lru_to_page(&(_page->lru)); \
  87. prefetch(&prev->_field); \
  88. } \
  89. } while (0)
  90. #else
  91. #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  92. #endif
  93. #ifdef ARCH_HAS_PREFETCHW
  94. #define prefetchw_prev_lru_page(_page, _base, _field) \
  95. do { \
  96. if ((_page)->lru.prev != _base) { \
  97. struct page *prev; \
  98. \
  99. prev = lru_to_page(&(_page->lru)); \
  100. prefetchw(&prev->_field); \
  101. } \
  102. } while (0)
  103. #else
  104. #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
  105. #endif
  106. /*
  107. * From 0 .. 100. Higher means more swappy.
  108. */
  109. int vm_swappiness = 60;
  110. long vm_total_pages; /* The total number of pages which the VM controls */
  111. static LIST_HEAD(shrinker_list);
  112. static DECLARE_RWSEM(shrinker_rwsem);
  113. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  114. #define scanning_global_lru(sc) (!(sc)->mem_cgroup)
  115. #else
  116. #define scanning_global_lru(sc) (1)
  117. #endif
  118. static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
  119. struct scan_control *sc)
  120. {
  121. if (!scanning_global_lru(sc))
  122. return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
  123. return &zone->reclaim_stat;
  124. }
  125. static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
  126. enum lru_list lru)
  127. {
  128. if (!scanning_global_lru(sc))
  129. return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
  130. return zone_page_state(zone, NR_LRU_BASE + lru);
  131. }
  132. /*
  133. * Add a shrinker callback to be called from the vm
  134. */
  135. void register_shrinker(struct shrinker *shrinker)
  136. {
  137. shrinker->nr = 0;
  138. down_write(&shrinker_rwsem);
  139. list_add_tail(&shrinker->list, &shrinker_list);
  140. up_write(&shrinker_rwsem);
  141. }
  142. EXPORT_SYMBOL(register_shrinker);
  143. /*
  144. * Remove one
  145. */
  146. void unregister_shrinker(struct shrinker *shrinker)
  147. {
  148. down_write(&shrinker_rwsem);
  149. list_del(&shrinker->list);
  150. up_write(&shrinker_rwsem);
  151. }
  152. EXPORT_SYMBOL(unregister_shrinker);
  153. #define SHRINK_BATCH 128
  154. /*
  155. * Call the shrink functions to age shrinkable caches
  156. *
  157. * Here we assume it costs one seek to replace a lru page and that it also
  158. * takes a seek to recreate a cache object. With this in mind we age equal
  159. * percentages of the lru and ageable caches. This should balance the seeks
  160. * generated by these structures.
  161. *
  162. * If the vm encountered mapped pages on the LRU it increase the pressure on
  163. * slab to avoid swapping.
  164. *
  165. * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  166. *
  167. * `lru_pages' represents the number of on-LRU pages in all the zones which
  168. * are eligible for the caller's allocation attempt. It is used for balancing
  169. * slab reclaim versus page reclaim.
  170. *
  171. * Returns the number of slab objects which we shrunk.
  172. */
  173. unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
  174. unsigned long lru_pages)
  175. {
  176. struct shrinker *shrinker;
  177. unsigned long ret = 0;
  178. if (scanned == 0)
  179. scanned = SWAP_CLUSTER_MAX;
  180. if (!down_read_trylock(&shrinker_rwsem))
  181. return 1; /* Assume we'll be able to shrink next time */
  182. list_for_each_entry(shrinker, &shrinker_list, list) {
  183. unsigned long long delta;
  184. unsigned long total_scan;
  185. unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
  186. delta = (4 * scanned) / shrinker->seeks;
  187. delta *= max_pass;
  188. do_div(delta, lru_pages + 1);
  189. shrinker->nr += delta;
  190. if (shrinker->nr < 0) {
  191. printk(KERN_ERR "shrink_slab: %pF negative objects to "
  192. "delete nr=%ld\n",
  193. shrinker->shrink, shrinker->nr);
  194. shrinker->nr = max_pass;
  195. }
  196. /*
  197. * Avoid risking looping forever due to too large nr value:
  198. * never try to free more than twice the estimate number of
  199. * freeable entries.
  200. */
  201. if (shrinker->nr > max_pass * 2)
  202. shrinker->nr = max_pass * 2;
  203. total_scan = shrinker->nr;
  204. shrinker->nr = 0;
  205. while (total_scan >= SHRINK_BATCH) {
  206. long this_scan = SHRINK_BATCH;
  207. int shrink_ret;
  208. int nr_before;
  209. nr_before = (*shrinker->shrink)(0, gfp_mask);
  210. shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
  211. if (shrink_ret == -1)
  212. break;
  213. if (shrink_ret < nr_before)
  214. ret += nr_before - shrink_ret;
  215. count_vm_events(SLABS_SCANNED, this_scan);
  216. total_scan -= this_scan;
  217. cond_resched();
  218. }
  219. shrinker->nr += total_scan;
  220. }
  221. up_read(&shrinker_rwsem);
  222. return ret;
  223. }
  224. /* Called without lock on whether page is mapped, so answer is unstable */
  225. static inline int page_mapping_inuse(struct page *page)
  226. {
  227. struct address_space *mapping;
  228. /* Page is in somebody's page tables. */
  229. if (page_mapped(page))
  230. return 1;
  231. /* Be more reluctant to reclaim swapcache than pagecache */
  232. if (PageSwapCache(page))
  233. return 1;
  234. mapping = page_mapping(page);
  235. if (!mapping)
  236. return 0;
  237. /* File is mmap'd by somebody? */
  238. return mapping_mapped(mapping);
  239. }
  240. static inline int is_page_cache_freeable(struct page *page)
  241. {
  242. return page_count(page) - !!page_has_private(page) == 2;
  243. }
  244. static int may_write_to_queue(struct backing_dev_info *bdi)
  245. {
  246. if (current->flags & PF_SWAPWRITE)
  247. return 1;
  248. if (!bdi_write_congested(bdi))
  249. return 1;
  250. if (bdi == current->backing_dev_info)
  251. return 1;
  252. return 0;
  253. }
  254. /*
  255. * We detected a synchronous write error writing a page out. Probably
  256. * -ENOSPC. We need to propagate that into the address_space for a subsequent
  257. * fsync(), msync() or close().
  258. *
  259. * The tricky part is that after writepage we cannot touch the mapping: nothing
  260. * prevents it from being freed up. But we have a ref on the page and once
  261. * that page is locked, the mapping is pinned.
  262. *
  263. * We're allowed to run sleeping lock_page() here because we know the caller has
  264. * __GFP_FS.
  265. */
  266. static void handle_write_error(struct address_space *mapping,
  267. struct page *page, int error)
  268. {
  269. lock_page(page);
  270. if (page_mapping(page) == mapping)
  271. mapping_set_error(mapping, error);
  272. unlock_page(page);
  273. }
  274. /* Request for sync pageout. */
  275. enum pageout_io {
  276. PAGEOUT_IO_ASYNC,
  277. PAGEOUT_IO_SYNC,
  278. };
  279. /* possible outcome of pageout() */
  280. typedef enum {
  281. /* failed to write page out, page is locked */
  282. PAGE_KEEP,
  283. /* move page to the active list, page is locked */
  284. PAGE_ACTIVATE,
  285. /* page has been sent to the disk successfully, page is unlocked */
  286. PAGE_SUCCESS,
  287. /* page is clean and locked */
  288. PAGE_CLEAN,
  289. } pageout_t;
  290. /*
  291. * pageout is called by shrink_page_list() for each dirty page.
  292. * Calls ->writepage().
  293. */
  294. static pageout_t pageout(struct page *page, struct address_space *mapping,
  295. enum pageout_io sync_writeback)
  296. {
  297. /*
  298. * If the page is dirty, only perform writeback if that write
  299. * will be non-blocking. To prevent this allocation from being
  300. * stalled by pagecache activity. But note that there may be
  301. * stalls if we need to run get_block(). We could test
  302. * PagePrivate for that.
  303. *
  304. * If this process is currently in generic_file_write() against
  305. * this page's queue, we can perform writeback even if that
  306. * will block.
  307. *
  308. * If the page is swapcache, write it back even if that would
  309. * block, for some throttling. This happens by accident, because
  310. * swap_backing_dev_info is bust: it doesn't reflect the
  311. * congestion state of the swapdevs. Easy to fix, if needed.
  312. * See swapfile.c:page_queue_congested().
  313. */
  314. if (!is_page_cache_freeable(page))
  315. return PAGE_KEEP;
  316. if (!mapping) {
  317. /*
  318. * Some data journaling orphaned pages can have
  319. * page->mapping == NULL while being dirty with clean buffers.
  320. */
  321. if (page_has_private(page)) {
  322. if (try_to_free_buffers(page)) {
  323. ClearPageDirty(page);
  324. printk("%s: orphaned page\n", __func__);
  325. return PAGE_CLEAN;
  326. }
  327. }
  328. return PAGE_KEEP;
  329. }
  330. if (mapping->a_ops->writepage == NULL)
  331. return PAGE_ACTIVATE;
  332. if (!may_write_to_queue(mapping->backing_dev_info))
  333. return PAGE_KEEP;
  334. if (clear_page_dirty_for_io(page)) {
  335. int res;
  336. struct writeback_control wbc = {
  337. .sync_mode = WB_SYNC_NONE,
  338. .nr_to_write = SWAP_CLUSTER_MAX,
  339. .range_start = 0,
  340. .range_end = LLONG_MAX,
  341. .nonblocking = 1,
  342. .for_reclaim = 1,
  343. };
  344. SetPageReclaim(page);
  345. res = mapping->a_ops->writepage(page, &wbc);
  346. if (res < 0)
  347. handle_write_error(mapping, page, res);
  348. if (res == AOP_WRITEPAGE_ACTIVATE) {
  349. ClearPageReclaim(page);
  350. return PAGE_ACTIVATE;
  351. }
  352. /*
  353. * Wait on writeback if requested to. This happens when
  354. * direct reclaiming a large contiguous area and the
  355. * first attempt to free a range of pages fails.
  356. */
  357. if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
  358. wait_on_page_writeback(page);
  359. if (!PageWriteback(page)) {
  360. /* synchronous write or broken a_ops? */
  361. ClearPageReclaim(page);
  362. }
  363. inc_zone_page_state(page, NR_VMSCAN_WRITE);
  364. return PAGE_SUCCESS;
  365. }
  366. return PAGE_CLEAN;
  367. }
  368. /*
  369. * Same as remove_mapping, but if the page is removed from the mapping, it
  370. * gets returned with a refcount of 0.
  371. */
  372. static int __remove_mapping(struct address_space *mapping, struct page *page)
  373. {
  374. BUG_ON(!PageLocked(page));
  375. BUG_ON(mapping != page_mapping(page));
  376. spin_lock_irq(&mapping->tree_lock);
  377. /*
  378. * The non racy check for a busy page.
  379. *
  380. * Must be careful with the order of the tests. When someone has
  381. * a ref to the page, it may be possible that they dirty it then
  382. * drop the reference. So if PageDirty is tested before page_count
  383. * here, then the following race may occur:
  384. *
  385. * get_user_pages(&page);
  386. * [user mapping goes away]
  387. * write_to(page);
  388. * !PageDirty(page) [good]
  389. * SetPageDirty(page);
  390. * put_page(page);
  391. * !page_count(page) [good, discard it]
  392. *
  393. * [oops, our write_to data is lost]
  394. *
  395. * Reversing the order of the tests ensures such a situation cannot
  396. * escape unnoticed. The smp_rmb is needed to ensure the page->flags
  397. * load is not satisfied before that of page->_count.
  398. *
  399. * Note that if SetPageDirty is always performed via set_page_dirty,
  400. * and thus under tree_lock, then this ordering is not required.
  401. */
  402. if (!page_freeze_refs(page, 2))
  403. goto cannot_free;
  404. /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
  405. if (unlikely(PageDirty(page))) {
  406. page_unfreeze_refs(page, 2);
  407. goto cannot_free;
  408. }
  409. if (PageSwapCache(page)) {
  410. swp_entry_t swap = { .val = page_private(page) };
  411. __delete_from_swap_cache(page);
  412. spin_unlock_irq(&mapping->tree_lock);
  413. swapcache_free(swap, page);
  414. } else {
  415. __remove_from_page_cache(page);
  416. spin_unlock_irq(&mapping->tree_lock);
  417. mem_cgroup_uncharge_cache_page(page);
  418. }
  419. return 1;
  420. cannot_free:
  421. spin_unlock_irq(&mapping->tree_lock);
  422. return 0;
  423. }
  424. /*
  425. * Attempt to detach a locked page from its ->mapping. If it is dirty or if
  426. * someone else has a ref on the page, abort and return 0. If it was
  427. * successfully detached, return 1. Assumes the caller has a single ref on
  428. * this page.
  429. */
  430. int remove_mapping(struct address_space *mapping, struct page *page)
  431. {
  432. if (__remove_mapping(mapping, page)) {
  433. /*
  434. * Unfreezing the refcount with 1 rather than 2 effectively
  435. * drops the pagecache ref for us without requiring another
  436. * atomic operation.
  437. */
  438. page_unfreeze_refs(page, 1);
  439. return 1;
  440. }
  441. return 0;
  442. }
  443. /**
  444. * putback_lru_page - put previously isolated page onto appropriate LRU list
  445. * @page: page to be put back to appropriate lru list
  446. *
  447. * Add previously isolated @page to appropriate LRU list.
  448. * Page may still be unevictable for other reasons.
  449. *
  450. * lru_lock must not be held, interrupts must be enabled.
  451. */
  452. void putback_lru_page(struct page *page)
  453. {
  454. int lru;
  455. int active = !!TestClearPageActive(page);
  456. int was_unevictable = PageUnevictable(page);
  457. VM_BUG_ON(PageLRU(page));
  458. redo:
  459. ClearPageUnevictable(page);
  460. if (page_evictable(page, NULL)) {
  461. /*
  462. * For evictable pages, we can use the cache.
  463. * In event of a race, worst case is we end up with an
  464. * unevictable page on [in]active list.
  465. * We know how to handle that.
  466. */
  467. lru = active + page_is_file_cache(page);
  468. lru_cache_add_lru(page, lru);
  469. } else {
  470. /*
  471. * Put unevictable pages directly on zone's unevictable
  472. * list.
  473. */
  474. lru = LRU_UNEVICTABLE;
  475. add_page_to_unevictable_list(page);
  476. }
  477. /*
  478. * page's status can change while we move it among lru. If an evictable
  479. * page is on unevictable list, it never be freed. To avoid that,
  480. * check after we added it to the list, again.
  481. */
  482. if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  483. if (!isolate_lru_page(page)) {
  484. put_page(page);
  485. goto redo;
  486. }
  487. /* This means someone else dropped this page from LRU
  488. * So, it will be freed or putback to LRU again. There is
  489. * nothing to do here.
  490. */
  491. }
  492. if (was_unevictable && lru != LRU_UNEVICTABLE)
  493. count_vm_event(UNEVICTABLE_PGRESCUED);
  494. else if (!was_unevictable && lru == LRU_UNEVICTABLE)
  495. count_vm_event(UNEVICTABLE_PGCULLED);
  496. put_page(page); /* drop ref from isolate */
  497. }
  498. /*
  499. * shrink_page_list() returns the number of reclaimed pages
  500. */
  501. static unsigned long shrink_page_list(struct list_head *page_list,
  502. struct scan_control *sc,
  503. enum pageout_io sync_writeback)
  504. {
  505. LIST_HEAD(ret_pages);
  506. struct pagevec freed_pvec;
  507. int pgactivate = 0;
  508. unsigned long nr_reclaimed = 0;
  509. unsigned long vm_flags;
  510. cond_resched();
  511. pagevec_init(&freed_pvec, 1);
  512. while (!list_empty(page_list)) {
  513. struct address_space *mapping;
  514. struct page *page;
  515. int may_enter_fs;
  516. int referenced;
  517. cond_resched();
  518. page = lru_to_page(page_list);
  519. list_del(&page->lru);
  520. if (!trylock_page(page))
  521. goto keep;
  522. VM_BUG_ON(PageActive(page));
  523. sc->nr_scanned++;
  524. if (unlikely(!page_evictable(page, NULL)))
  525. goto cull_mlocked;
  526. if (!sc->may_unmap && page_mapped(page))
  527. goto keep_locked;
  528. /* Double the slab pressure for mapped and swapcache pages */
  529. if (page_mapped(page) || PageSwapCache(page))
  530. sc->nr_scanned++;
  531. may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
  532. (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
  533. if (PageWriteback(page)) {
  534. /*
  535. * Synchronous reclaim is performed in two passes,
  536. * first an asynchronous pass over the list to
  537. * start parallel writeback, and a second synchronous
  538. * pass to wait for the IO to complete. Wait here
  539. * for any page for which writeback has already
  540. * started.
  541. */
  542. if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
  543. wait_on_page_writeback(page);
  544. else
  545. goto keep_locked;
  546. }
  547. referenced = page_referenced(page, 1,
  548. sc->mem_cgroup, &vm_flags);
  549. /* In active use or really unfreeable? Activate it. */
  550. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
  551. referenced && page_mapping_inuse(page))
  552. goto activate_locked;
  553. /*
  554. * Anonymous process memory has backing store?
  555. * Try to allocate it some swap space here.
  556. */
  557. if (PageAnon(page) && !PageSwapCache(page)) {
  558. if (!(sc->gfp_mask & __GFP_IO))
  559. goto keep_locked;
  560. if (!add_to_swap(page))
  561. goto activate_locked;
  562. may_enter_fs = 1;
  563. }
  564. mapping = page_mapping(page);
  565. /*
  566. * The page is mapped into the page tables of one or more
  567. * processes. Try to unmap it here.
  568. */
  569. if (page_mapped(page) && mapping) {
  570. switch (try_to_unmap(page, 0)) {
  571. case SWAP_FAIL:
  572. goto activate_locked;
  573. case SWAP_AGAIN:
  574. goto keep_locked;
  575. case SWAP_MLOCK:
  576. goto cull_mlocked;
  577. case SWAP_SUCCESS:
  578. ; /* try to free the page below */
  579. }
  580. }
  581. if (PageDirty(page)) {
  582. if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
  583. goto keep_locked;
  584. if (!may_enter_fs)
  585. goto keep_locked;
  586. if (!sc->may_writepage)
  587. goto keep_locked;
  588. /* Page is dirty, try to write it out here */
  589. switch (pageout(page, mapping, sync_writeback)) {
  590. case PAGE_KEEP:
  591. goto keep_locked;
  592. case PAGE_ACTIVATE:
  593. goto activate_locked;
  594. case PAGE_SUCCESS:
  595. if (PageWriteback(page) || PageDirty(page))
  596. goto keep;
  597. /*
  598. * A synchronous write - probably a ramdisk. Go
  599. * ahead and try to reclaim the page.
  600. */
  601. if (!trylock_page(page))
  602. goto keep;
  603. if (PageDirty(page) || PageWriteback(page))
  604. goto keep_locked;
  605. mapping = page_mapping(page);
  606. case PAGE_CLEAN:
  607. ; /* try to free the page below */
  608. }
  609. }
  610. /*
  611. * If the page has buffers, try to free the buffer mappings
  612. * associated with this page. If we succeed we try to free
  613. * the page as well.
  614. *
  615. * We do this even if the page is PageDirty().
  616. * try_to_release_page() does not perform I/O, but it is
  617. * possible for a page to have PageDirty set, but it is actually
  618. * clean (all its buffers are clean). This happens if the
  619. * buffers were written out directly, with submit_bh(). ext3
  620. * will do this, as well as the blockdev mapping.
  621. * try_to_release_page() will discover that cleanness and will
  622. * drop the buffers and mark the page clean - it can be freed.
  623. *
  624. * Rarely, pages can have buffers and no ->mapping. These are
  625. * the pages which were not successfully invalidated in
  626. * truncate_complete_page(). We try to drop those buffers here
  627. * and if that worked, and the page is no longer mapped into
  628. * process address space (page_count == 1) it can be freed.
  629. * Otherwise, leave the page on the LRU so it is swappable.
  630. */
  631. if (page_has_private(page)) {
  632. if (!try_to_release_page(page, sc->gfp_mask))
  633. goto activate_locked;
  634. if (!mapping && page_count(page) == 1) {
  635. unlock_page(page);
  636. if (put_page_testzero(page))
  637. goto free_it;
  638. else {
  639. /*
  640. * rare race with speculative reference.
  641. * the speculative reference will free
  642. * this page shortly, so we may
  643. * increment nr_reclaimed here (and
  644. * leave it off the LRU).
  645. */
  646. nr_reclaimed++;
  647. continue;
  648. }
  649. }
  650. }
  651. if (!mapping || !__remove_mapping(mapping, page))
  652. goto keep_locked;
  653. /*
  654. * At this point, we have no other references and there is
  655. * no way to pick any more up (removed from LRU, removed
  656. * from pagecache). Can use non-atomic bitops now (and
  657. * we obviously don't have to worry about waking up a process
  658. * waiting on the page lock, because there are no references.
  659. */
  660. __clear_page_locked(page);
  661. free_it:
  662. nr_reclaimed++;
  663. if (!pagevec_add(&freed_pvec, page)) {
  664. __pagevec_free(&freed_pvec);
  665. pagevec_reinit(&freed_pvec);
  666. }
  667. continue;
  668. cull_mlocked:
  669. if (PageSwapCache(page))
  670. try_to_free_swap(page);
  671. unlock_page(page);
  672. putback_lru_page(page);
  673. continue;
  674. activate_locked:
  675. /* Not a candidate for swapping, so reclaim swap space. */
  676. if (PageSwapCache(page) && vm_swap_full())
  677. try_to_free_swap(page);
  678. VM_BUG_ON(PageActive(page));
  679. SetPageActive(page);
  680. pgactivate++;
  681. keep_locked:
  682. unlock_page(page);
  683. keep:
  684. list_add(&page->lru, &ret_pages);
  685. VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
  686. }
  687. list_splice(&ret_pages, page_list);
  688. if (pagevec_count(&freed_pvec))
  689. __pagevec_free(&freed_pvec);
  690. count_vm_events(PGACTIVATE, pgactivate);
  691. return nr_reclaimed;
  692. }
  693. /* LRU Isolation modes. */
  694. #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
  695. #define ISOLATE_ACTIVE 1 /* Isolate active pages. */
  696. #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
  697. /*
  698. * Attempt to remove the specified page from its LRU. Only take this page
  699. * if it is of the appropriate PageActive status. Pages which are being
  700. * freed elsewhere are also ignored.
  701. *
  702. * page: page to consider
  703. * mode: one of the LRU isolation modes defined above
  704. *
  705. * returns 0 on success, -ve errno on failure.
  706. */
  707. int __isolate_lru_page(struct page *page, int mode, int file)
  708. {
  709. int ret = -EINVAL;
  710. /* Only take pages on the LRU. */
  711. if (!PageLRU(page))
  712. return ret;
  713. /*
  714. * When checking the active state, we need to be sure we are
  715. * dealing with comparible boolean values. Take the logical not
  716. * of each.
  717. */
  718. if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
  719. return ret;
  720. if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
  721. return ret;
  722. /*
  723. * When this function is being called for lumpy reclaim, we
  724. * initially look into all LRU pages, active, inactive and
  725. * unevictable; only give shrink_page_list evictable pages.
  726. */
  727. if (PageUnevictable(page))
  728. return ret;
  729. ret = -EBUSY;
  730. if (likely(get_page_unless_zero(page))) {
  731. /*
  732. * Be careful not to clear PageLRU until after we're
  733. * sure the page is not being freed elsewhere -- the
  734. * page release code relies on it.
  735. */
  736. ClearPageLRU(page);
  737. ret = 0;
  738. }
  739. return ret;
  740. }
  741. /*
  742. * zone->lru_lock is heavily contended. Some of the functions that
  743. * shrink the lists perform better by taking out a batch of pages
  744. * and working on them outside the LRU lock.
  745. *
  746. * For pagecache intensive workloads, this function is the hottest
  747. * spot in the kernel (apart from copy_*_user functions).
  748. *
  749. * Appropriate locks must be held before calling this function.
  750. *
  751. * @nr_to_scan: The number of pages to look through on the list.
  752. * @src: The LRU list to pull pages off.
  753. * @dst: The temp list to put pages on to.
  754. * @scanned: The number of pages that were scanned.
  755. * @order: The caller's attempted allocation order
  756. * @mode: One of the LRU isolation modes
  757. * @file: True [1] if isolating file [!anon] pages
  758. *
  759. * returns how many pages were moved onto *@dst.
  760. */
  761. static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
  762. struct list_head *src, struct list_head *dst,
  763. unsigned long *scanned, int order, int mode, int file)
  764. {
  765. unsigned long nr_taken = 0;
  766. unsigned long scan;
  767. for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
  768. struct page *page;
  769. unsigned long pfn;
  770. unsigned long end_pfn;
  771. unsigned long page_pfn;
  772. int zone_id;
  773. page = lru_to_page(src);
  774. prefetchw_prev_lru_page(page, src, flags);
  775. VM_BUG_ON(!PageLRU(page));
  776. switch (__isolate_lru_page(page, mode, file)) {
  777. case 0:
  778. list_move(&page->lru, dst);
  779. mem_cgroup_del_lru(page);
  780. nr_taken++;
  781. break;
  782. case -EBUSY:
  783. /* else it is being freed elsewhere */
  784. list_move(&page->lru, src);
  785. mem_cgroup_rotate_lru_list(page, page_lru(page));
  786. continue;
  787. default:
  788. BUG();
  789. }
  790. if (!order)
  791. continue;
  792. /*
  793. * Attempt to take all pages in the order aligned region
  794. * surrounding the tag page. Only take those pages of
  795. * the same active state as that tag page. We may safely
  796. * round the target page pfn down to the requested order
  797. * as the mem_map is guarenteed valid out to MAX_ORDER,
  798. * where that page is in a different zone we will detect
  799. * it from its zone id and abort this block scan.
  800. */
  801. zone_id = page_zone_id(page);
  802. page_pfn = page_to_pfn(page);
  803. pfn = page_pfn & ~((1 << order) - 1);
  804. end_pfn = pfn + (1 << order);
  805. for (; pfn < end_pfn; pfn++) {
  806. struct page *cursor_page;
  807. /* The target page is in the block, ignore it. */
  808. if (unlikely(pfn == page_pfn))
  809. continue;
  810. /* Avoid holes within the zone. */
  811. if (unlikely(!pfn_valid_within(pfn)))
  812. break;
  813. cursor_page = pfn_to_page(pfn);
  814. /* Check that we have not crossed a zone boundary. */
  815. if (unlikely(page_zone_id(cursor_page) != zone_id))
  816. continue;
  817. if (__isolate_lru_page(cursor_page, mode, file) == 0) {
  818. list_move(&cursor_page->lru, dst);
  819. mem_cgroup_del_lru(cursor_page);
  820. nr_taken++;
  821. scan++;
  822. }
  823. }
  824. }
  825. *scanned = scan;
  826. return nr_taken;
  827. }
  828. static unsigned long isolate_pages_global(unsigned long nr,
  829. struct list_head *dst,
  830. unsigned long *scanned, int order,
  831. int mode, struct zone *z,
  832. struct mem_cgroup *mem_cont,
  833. int active, int file)
  834. {
  835. int lru = LRU_BASE;
  836. if (active)
  837. lru += LRU_ACTIVE;
  838. if (file)
  839. lru += LRU_FILE;
  840. return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
  841. mode, !!file);
  842. }
  843. /*
  844. * clear_active_flags() is a helper for shrink_active_list(), clearing
  845. * any active bits from the pages in the list.
  846. */
  847. static unsigned long clear_active_flags(struct list_head *page_list,
  848. unsigned int *count)
  849. {
  850. int nr_active = 0;
  851. int lru;
  852. struct page *page;
  853. list_for_each_entry(page, page_list, lru) {
  854. lru = page_is_file_cache(page);
  855. if (PageActive(page)) {
  856. lru += LRU_ACTIVE;
  857. ClearPageActive(page);
  858. nr_active++;
  859. }
  860. count[lru]++;
  861. }
  862. return nr_active;
  863. }
  864. /**
  865. * isolate_lru_page - tries to isolate a page from its LRU list
  866. * @page: page to isolate from its LRU list
  867. *
  868. * Isolates a @page from an LRU list, clears PageLRU and adjusts the
  869. * vmstat statistic corresponding to whatever LRU list the page was on.
  870. *
  871. * Returns 0 if the page was removed from an LRU list.
  872. * Returns -EBUSY if the page was not on an LRU list.
  873. *
  874. * The returned page will have PageLRU() cleared. If it was found on
  875. * the active list, it will have PageActive set. If it was found on
  876. * the unevictable list, it will have the PageUnevictable bit set. That flag
  877. * may need to be cleared by the caller before letting the page go.
  878. *
  879. * The vmstat statistic corresponding to the list on which the page was
  880. * found will be decremented.
  881. *
  882. * Restrictions:
  883. * (1) Must be called with an elevated refcount on the page. This is a
  884. * fundamentnal difference from isolate_lru_pages (which is called
  885. * without a stable reference).
  886. * (2) the lru_lock must not be held.
  887. * (3) interrupts must be enabled.
  888. */
  889. int isolate_lru_page(struct page *page)
  890. {
  891. int ret = -EBUSY;
  892. if (PageLRU(page)) {
  893. struct zone *zone = page_zone(page);
  894. spin_lock_irq(&zone->lru_lock);
  895. if (PageLRU(page) && get_page_unless_zero(page)) {
  896. int lru = page_lru(page);
  897. ret = 0;
  898. ClearPageLRU(page);
  899. del_page_from_lru_list(zone, page, lru);
  900. }
  901. spin_unlock_irq(&zone->lru_lock);
  902. }
  903. return ret;
  904. }
  905. /*
  906. * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
  907. * of reclaimed pages
  908. */
  909. static unsigned long shrink_inactive_list(unsigned long max_scan,
  910. struct zone *zone, struct scan_control *sc,
  911. int priority, int file)
  912. {
  913. LIST_HEAD(page_list);
  914. struct pagevec pvec;
  915. unsigned long nr_scanned = 0;
  916. unsigned long nr_reclaimed = 0;
  917. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  918. int lumpy_reclaim = 0;
  919. /*
  920. * If we need a large contiguous chunk of memory, or have
  921. * trouble getting a small set of contiguous pages, we
  922. * will reclaim both active and inactive pages.
  923. *
  924. * We use the same threshold as pageout congestion_wait below.
  925. */
  926. if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
  927. lumpy_reclaim = 1;
  928. else if (sc->order && priority < DEF_PRIORITY - 2)
  929. lumpy_reclaim = 1;
  930. pagevec_init(&pvec, 1);
  931. lru_add_drain();
  932. spin_lock_irq(&zone->lru_lock);
  933. do {
  934. struct page *page;
  935. unsigned long nr_taken;
  936. unsigned long nr_scan;
  937. unsigned long nr_freed;
  938. unsigned long nr_active;
  939. unsigned int count[NR_LRU_LISTS] = { 0, };
  940. int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
  941. nr_taken = sc->isolate_pages(sc->swap_cluster_max,
  942. &page_list, &nr_scan, sc->order, mode,
  943. zone, sc->mem_cgroup, 0, file);
  944. nr_active = clear_active_flags(&page_list, count);
  945. __count_vm_events(PGDEACTIVATE, nr_active);
  946. __mod_zone_page_state(zone, NR_ACTIVE_FILE,
  947. -count[LRU_ACTIVE_FILE]);
  948. __mod_zone_page_state(zone, NR_INACTIVE_FILE,
  949. -count[LRU_INACTIVE_FILE]);
  950. __mod_zone_page_state(zone, NR_ACTIVE_ANON,
  951. -count[LRU_ACTIVE_ANON]);
  952. __mod_zone_page_state(zone, NR_INACTIVE_ANON,
  953. -count[LRU_INACTIVE_ANON]);
  954. if (scanning_global_lru(sc))
  955. zone->pages_scanned += nr_scan;
  956. reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
  957. reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
  958. reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
  959. reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
  960. spin_unlock_irq(&zone->lru_lock);
  961. nr_scanned += nr_scan;
  962. nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
  963. /*
  964. * If we are direct reclaiming for contiguous pages and we do
  965. * not reclaim everything in the list, try again and wait
  966. * for IO to complete. This will stall high-order allocations
  967. * but that should be acceptable to the caller
  968. */
  969. if (nr_freed < nr_taken && !current_is_kswapd() &&
  970. lumpy_reclaim) {
  971. congestion_wait(BLK_RW_ASYNC, HZ/10);
  972. /*
  973. * The attempt at page out may have made some
  974. * of the pages active, mark them inactive again.
  975. */
  976. nr_active = clear_active_flags(&page_list, count);
  977. count_vm_events(PGDEACTIVATE, nr_active);
  978. nr_freed += shrink_page_list(&page_list, sc,
  979. PAGEOUT_IO_SYNC);
  980. }
  981. nr_reclaimed += nr_freed;
  982. local_irq_disable();
  983. if (current_is_kswapd()) {
  984. __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
  985. __count_vm_events(KSWAPD_STEAL, nr_freed);
  986. } else if (scanning_global_lru(sc))
  987. __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
  988. __count_zone_vm_events(PGSTEAL, zone, nr_freed);
  989. if (nr_taken == 0)
  990. goto done;
  991. spin_lock(&zone->lru_lock);
  992. /*
  993. * Put back any unfreeable pages.
  994. */
  995. while (!list_empty(&page_list)) {
  996. int lru;
  997. page = lru_to_page(&page_list);
  998. VM_BUG_ON(PageLRU(page));
  999. list_del(&page->lru);
  1000. if (unlikely(!page_evictable(page, NULL))) {
  1001. spin_unlock_irq(&zone->lru_lock);
  1002. putback_lru_page(page);
  1003. spin_lock_irq(&zone->lru_lock);
  1004. continue;
  1005. }
  1006. SetPageLRU(page);
  1007. lru = page_lru(page);
  1008. add_page_to_lru_list(zone, page, lru);
  1009. if (PageActive(page)) {
  1010. int file = !!page_is_file_cache(page);
  1011. reclaim_stat->recent_rotated[file]++;
  1012. }
  1013. if (!pagevec_add(&pvec, page)) {
  1014. spin_unlock_irq(&zone->lru_lock);
  1015. __pagevec_release(&pvec);
  1016. spin_lock_irq(&zone->lru_lock);
  1017. }
  1018. }
  1019. } while (nr_scanned < max_scan);
  1020. spin_unlock(&zone->lru_lock);
  1021. done:
  1022. local_irq_enable();
  1023. pagevec_release(&pvec);
  1024. return nr_reclaimed;
  1025. }
  1026. /*
  1027. * We are about to scan this zone at a certain priority level. If that priority
  1028. * level is smaller (ie: more urgent) than the previous priority, then note
  1029. * that priority level within the zone. This is done so that when the next
  1030. * process comes in to scan this zone, it will immediately start out at this
  1031. * priority level rather than having to build up its own scanning priority.
  1032. * Here, this priority affects only the reclaim-mapped threshold.
  1033. */
  1034. static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  1035. {
  1036. if (priority < zone->prev_priority)
  1037. zone->prev_priority = priority;
  1038. }
  1039. /*
  1040. * This moves pages from the active list to the inactive list.
  1041. *
  1042. * We move them the other way if the page is referenced by one or more
  1043. * processes, from rmap.
  1044. *
  1045. * If the pages are mostly unmapped, the processing is fast and it is
  1046. * appropriate to hold zone->lru_lock across the whole operation. But if
  1047. * the pages are mapped, the processing is slow (page_referenced()) so we
  1048. * should drop zone->lru_lock around each page. It's impossible to balance
  1049. * this, so instead we remove the pages from the LRU while processing them.
  1050. * It is safe to rely on PG_active against the non-LRU pages in here because
  1051. * nobody will play with that bit on a non-LRU page.
  1052. *
  1053. * The downside is that we have to touch page->_count against each page.
  1054. * But we had to alter page->flags anyway.
  1055. */
  1056. static void move_active_pages_to_lru(struct zone *zone,
  1057. struct list_head *list,
  1058. enum lru_list lru)
  1059. {
  1060. unsigned long pgmoved = 0;
  1061. struct pagevec pvec;
  1062. struct page *page;
  1063. pagevec_init(&pvec, 1);
  1064. while (!list_empty(list)) {
  1065. page = lru_to_page(list);
  1066. prefetchw_prev_lru_page(page, list, flags);
  1067. VM_BUG_ON(PageLRU(page));
  1068. SetPageLRU(page);
  1069. VM_BUG_ON(!PageActive(page));
  1070. if (!is_active_lru(lru))
  1071. ClearPageActive(page); /* we are de-activating */
  1072. list_move(&page->lru, &zone->lru[lru].list);
  1073. mem_cgroup_add_lru_list(page, lru);
  1074. pgmoved++;
  1075. if (!pagevec_add(&pvec, page) || list_empty(list)) {
  1076. spin_unlock_irq(&zone->lru_lock);
  1077. if (buffer_heads_over_limit)
  1078. pagevec_strip(&pvec);
  1079. __pagevec_release(&pvec);
  1080. spin_lock_irq(&zone->lru_lock);
  1081. }
  1082. }
  1083. __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
  1084. if (!is_active_lru(lru))
  1085. __count_vm_events(PGDEACTIVATE, pgmoved);
  1086. }
  1087. static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
  1088. struct scan_control *sc, int priority, int file)
  1089. {
  1090. unsigned long pgmoved;
  1091. unsigned long pgscanned;
  1092. unsigned long vm_flags;
  1093. LIST_HEAD(l_hold); /* The pages which were snipped off */
  1094. LIST_HEAD(l_active);
  1095. LIST_HEAD(l_inactive);
  1096. struct page *page;
  1097. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1098. lru_add_drain();
  1099. spin_lock_irq(&zone->lru_lock);
  1100. pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
  1101. ISOLATE_ACTIVE, zone,
  1102. sc->mem_cgroup, 1, file);
  1103. /*
  1104. * zone->pages_scanned is used for detect zone's oom
  1105. * mem_cgroup remembers nr_scan by itself.
  1106. */
  1107. if (scanning_global_lru(sc)) {
  1108. zone->pages_scanned += pgscanned;
  1109. }
  1110. reclaim_stat->recent_scanned[!!file] += pgmoved;
  1111. __count_zone_vm_events(PGREFILL, zone, pgscanned);
  1112. if (file)
  1113. __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
  1114. else
  1115. __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
  1116. spin_unlock_irq(&zone->lru_lock);
  1117. pgmoved = 0; /* count referenced (mapping) mapped pages */
  1118. while (!list_empty(&l_hold)) {
  1119. cond_resched();
  1120. page = lru_to_page(&l_hold);
  1121. list_del(&page->lru);
  1122. if (unlikely(!page_evictable(page, NULL))) {
  1123. putback_lru_page(page);
  1124. continue;
  1125. }
  1126. /* page_referenced clears PageReferenced */
  1127. if (page_mapping_inuse(page) &&
  1128. page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
  1129. pgmoved++;
  1130. /*
  1131. * Identify referenced, file-backed active pages and
  1132. * give them one more trip around the active list. So
  1133. * that executable code get better chances to stay in
  1134. * memory under moderate memory pressure. Anon pages
  1135. * are not likely to be evicted by use-once streaming
  1136. * IO, plus JVM can create lots of anon VM_EXEC pages,
  1137. * so we ignore them here.
  1138. */
  1139. if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
  1140. list_add(&page->lru, &l_active);
  1141. continue;
  1142. }
  1143. }
  1144. list_add(&page->lru, &l_inactive);
  1145. }
  1146. /*
  1147. * Move pages back to the lru list.
  1148. */
  1149. spin_lock_irq(&zone->lru_lock);
  1150. /*
  1151. * Count referenced pages from currently used mappings as rotated,
  1152. * even though only some of them are actually re-activated. This
  1153. * helps balance scan pressure between file and anonymous pages in
  1154. * get_scan_ratio.
  1155. */
  1156. reclaim_stat->recent_rotated[!!file] += pgmoved;
  1157. move_active_pages_to_lru(zone, &l_active,
  1158. LRU_ACTIVE + file * LRU_FILE);
  1159. move_active_pages_to_lru(zone, &l_inactive,
  1160. LRU_BASE + file * LRU_FILE);
  1161. spin_unlock_irq(&zone->lru_lock);
  1162. }
  1163. static int inactive_anon_is_low_global(struct zone *zone)
  1164. {
  1165. unsigned long active, inactive;
  1166. active = zone_page_state(zone, NR_ACTIVE_ANON);
  1167. inactive = zone_page_state(zone, NR_INACTIVE_ANON);
  1168. if (inactive * zone->inactive_ratio < active)
  1169. return 1;
  1170. return 0;
  1171. }
  1172. /**
  1173. * inactive_anon_is_low - check if anonymous pages need to be deactivated
  1174. * @zone: zone to check
  1175. * @sc: scan control of this context
  1176. *
  1177. * Returns true if the zone does not have enough inactive anon pages,
  1178. * meaning some active anon pages need to be deactivated.
  1179. */
  1180. static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
  1181. {
  1182. int low;
  1183. if (scanning_global_lru(sc))
  1184. low = inactive_anon_is_low_global(zone);
  1185. else
  1186. low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
  1187. return low;
  1188. }
  1189. static int inactive_file_is_low_global(struct zone *zone)
  1190. {
  1191. unsigned long active, inactive;
  1192. active = zone_page_state(zone, NR_ACTIVE_FILE);
  1193. inactive = zone_page_state(zone, NR_INACTIVE_FILE);
  1194. return (active > inactive);
  1195. }
  1196. /**
  1197. * inactive_file_is_low - check if file pages need to be deactivated
  1198. * @zone: zone to check
  1199. * @sc: scan control of this context
  1200. *
  1201. * When the system is doing streaming IO, memory pressure here
  1202. * ensures that active file pages get deactivated, until more
  1203. * than half of the file pages are on the inactive list.
  1204. *
  1205. * Once we get to that situation, protect the system's working
  1206. * set from being evicted by disabling active file page aging.
  1207. *
  1208. * This uses a different ratio than the anonymous pages, because
  1209. * the page cache uses a use-once replacement algorithm.
  1210. */
  1211. static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
  1212. {
  1213. int low;
  1214. if (scanning_global_lru(sc))
  1215. low = inactive_file_is_low_global(zone);
  1216. else
  1217. low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
  1218. return low;
  1219. }
  1220. static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
  1221. struct zone *zone, struct scan_control *sc, int priority)
  1222. {
  1223. int file = is_file_lru(lru);
  1224. if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
  1225. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1226. return 0;
  1227. }
  1228. if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
  1229. shrink_active_list(nr_to_scan, zone, sc, priority, file);
  1230. return 0;
  1231. }
  1232. return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
  1233. }
  1234. /*
  1235. * Determine how aggressively the anon and file LRU lists should be
  1236. * scanned. The relative value of each set of LRU lists is determined
  1237. * by looking at the fraction of the pages scanned we did rotate back
  1238. * onto the active list instead of evict.
  1239. *
  1240. * percent[0] specifies how much pressure to put on ram/swap backed
  1241. * memory, while percent[1] determines pressure on the file LRUs.
  1242. */
  1243. static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
  1244. unsigned long *percent)
  1245. {
  1246. unsigned long anon, file, free;
  1247. unsigned long anon_prio, file_prio;
  1248. unsigned long ap, fp;
  1249. struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
  1250. anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
  1251. zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
  1252. file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
  1253. zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
  1254. if (scanning_global_lru(sc)) {
  1255. free = zone_page_state(zone, NR_FREE_PAGES);
  1256. /* If we have very few page cache pages,
  1257. force-scan anon pages. */
  1258. if (unlikely(file + free <= high_wmark_pages(zone))) {
  1259. percent[0] = 100;
  1260. percent[1] = 0;
  1261. return;
  1262. }
  1263. }
  1264. /*
  1265. * OK, so we have swap space and a fair amount of page cache
  1266. * pages. We use the recently rotated / recently scanned
  1267. * ratios to determine how valuable each cache is.
  1268. *
  1269. * Because workloads change over time (and to avoid overflow)
  1270. * we keep these statistics as a floating average, which ends
  1271. * up weighing recent references more than old ones.
  1272. *
  1273. * anon in [0], file in [1]
  1274. */
  1275. if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
  1276. spin_lock_irq(&zone->lru_lock);
  1277. reclaim_stat->recent_scanned[0] /= 2;
  1278. reclaim_stat->recent_rotated[0] /= 2;
  1279. spin_unlock_irq(&zone->lru_lock);
  1280. }
  1281. if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
  1282. spin_lock_irq(&zone->lru_lock);
  1283. reclaim_stat->recent_scanned[1] /= 2;
  1284. reclaim_stat->recent_rotated[1] /= 2;
  1285. spin_unlock_irq(&zone->lru_lock);
  1286. }
  1287. /*
  1288. * With swappiness at 100, anonymous and file have the same priority.
  1289. * This scanning priority is essentially the inverse of IO cost.
  1290. */
  1291. anon_prio = sc->swappiness;
  1292. file_prio = 200 - sc->swappiness;
  1293. /*
  1294. * The amount of pressure on anon vs file pages is inversely
  1295. * proportional to the fraction of recently scanned pages on
  1296. * each list that were recently referenced and in active use.
  1297. */
  1298. ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
  1299. ap /= reclaim_stat->recent_rotated[0] + 1;
  1300. fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
  1301. fp /= reclaim_stat->recent_rotated[1] + 1;
  1302. /* Normalize to percentages */
  1303. percent[0] = 100 * ap / (ap + fp + 1);
  1304. percent[1] = 100 - percent[0];
  1305. }
  1306. /*
  1307. * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
  1308. * until we collected @swap_cluster_max pages to scan.
  1309. */
  1310. static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
  1311. unsigned long *nr_saved_scan,
  1312. unsigned long swap_cluster_max)
  1313. {
  1314. unsigned long nr;
  1315. *nr_saved_scan += nr_to_scan;
  1316. nr = *nr_saved_scan;
  1317. if (nr >= swap_cluster_max)
  1318. *nr_saved_scan = 0;
  1319. else
  1320. nr = 0;
  1321. return nr;
  1322. }
  1323. /*
  1324. * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
  1325. */
  1326. static void shrink_zone(int priority, struct zone *zone,
  1327. struct scan_control *sc)
  1328. {
  1329. unsigned long nr[NR_LRU_LISTS];
  1330. unsigned long nr_to_scan;
  1331. unsigned long percent[2]; /* anon @ 0; file @ 1 */
  1332. enum lru_list l;
  1333. unsigned long nr_reclaimed = sc->nr_reclaimed;
  1334. unsigned long swap_cluster_max = sc->swap_cluster_max;
  1335. int noswap = 0;
  1336. /* If we have no swap space, do not bother scanning anon pages. */
  1337. if (!sc->may_swap || (nr_swap_pages <= 0)) {
  1338. noswap = 1;
  1339. percent[0] = 0;
  1340. percent[1] = 100;
  1341. } else
  1342. get_scan_ratio(zone, sc, percent);
  1343. for_each_evictable_lru(l) {
  1344. int file = is_file_lru(l);
  1345. unsigned long scan;
  1346. scan = zone_nr_pages(zone, sc, l);
  1347. if (priority || noswap) {
  1348. scan >>= priority;
  1349. scan = (scan * percent[file]) / 100;
  1350. }
  1351. if (scanning_global_lru(sc))
  1352. nr[l] = nr_scan_try_batch(scan,
  1353. &zone->lru[l].nr_saved_scan,
  1354. swap_cluster_max);
  1355. else
  1356. nr[l] = scan;
  1357. }
  1358. while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
  1359. nr[LRU_INACTIVE_FILE]) {
  1360. for_each_evictable_lru(l) {
  1361. if (nr[l]) {
  1362. nr_to_scan = min(nr[l], swap_cluster_max);
  1363. nr[l] -= nr_to_scan;
  1364. nr_reclaimed += shrink_list(l, nr_to_scan,
  1365. zone, sc, priority);
  1366. }
  1367. }
  1368. /*
  1369. * On large memory systems, scan >> priority can become
  1370. * really large. This is fine for the starting priority;
  1371. * we want to put equal scanning pressure on each zone.
  1372. * However, if the VM has a harder time of freeing pages,
  1373. * with multiple processes reclaiming pages, the total
  1374. * freeing target can get unreasonably large.
  1375. */
  1376. if (nr_reclaimed > swap_cluster_max &&
  1377. priority < DEF_PRIORITY && !current_is_kswapd())
  1378. break;
  1379. }
  1380. sc->nr_reclaimed = nr_reclaimed;
  1381. /*
  1382. * Even if we did not try to evict anon pages at all, we want to
  1383. * rebalance the anon lru active/inactive ratio.
  1384. */
  1385. if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
  1386. shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
  1387. throttle_vm_writeout(sc->gfp_mask);
  1388. }
  1389. /*
  1390. * This is the direct reclaim path, for page-allocating processes. We only
  1391. * try to reclaim pages from zones which will satisfy the caller's allocation
  1392. * request.
  1393. *
  1394. * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
  1395. * Because:
  1396. * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  1397. * allocation or
  1398. * b) The target zone may be at high_wmark_pages(zone) but the lower zones
  1399. * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
  1400. * zone defense algorithm.
  1401. *
  1402. * If a zone is deemed to be full of pinned pages then just give it a light
  1403. * scan then give up on it.
  1404. */
  1405. static void shrink_zones(int priority, struct zonelist *zonelist,
  1406. struct scan_control *sc)
  1407. {
  1408. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1409. struct zoneref *z;
  1410. struct zone *zone;
  1411. sc->all_unreclaimable = 1;
  1412. for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
  1413. sc->nodemask) {
  1414. if (!populated_zone(zone))
  1415. continue;
  1416. /*
  1417. * Take care memory controller reclaiming has small influence
  1418. * to global LRU.
  1419. */
  1420. if (scanning_global_lru(sc)) {
  1421. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1422. continue;
  1423. note_zone_scanning_priority(zone, priority);
  1424. if (zone_is_all_unreclaimable(zone) &&
  1425. priority != DEF_PRIORITY)
  1426. continue; /* Let kswapd poll it */
  1427. sc->all_unreclaimable = 0;
  1428. } else {
  1429. /*
  1430. * Ignore cpuset limitation here. We just want to reduce
  1431. * # of used pages by us regardless of memory shortage.
  1432. */
  1433. sc->all_unreclaimable = 0;
  1434. mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
  1435. priority);
  1436. }
  1437. shrink_zone(priority, zone, sc);
  1438. }
  1439. }
  1440. /*
  1441. * This is the main entry point to direct page reclaim.
  1442. *
  1443. * If a full scan of the inactive list fails to free enough memory then we
  1444. * are "out of memory" and something needs to be killed.
  1445. *
  1446. * If the caller is !__GFP_FS then the probability of a failure is reasonably
  1447. * high - the zone may be full of dirty or under-writeback pages, which this
  1448. * caller can't do much about. We kick pdflush and take explicit naps in the
  1449. * hope that some of these pages can be written. But if the allocating task
  1450. * holds filesystem locks which prevent writeout this might not work, and the
  1451. * allocation attempt will fail.
  1452. *
  1453. * returns: 0, if no pages reclaimed
  1454. * else, the number of pages reclaimed
  1455. */
  1456. static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
  1457. struct scan_control *sc)
  1458. {
  1459. int priority;
  1460. unsigned long ret = 0;
  1461. unsigned long total_scanned = 0;
  1462. struct reclaim_state *reclaim_state = current->reclaim_state;
  1463. unsigned long lru_pages = 0;
  1464. struct zoneref *z;
  1465. struct zone *zone;
  1466. enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
  1467. delayacct_freepages_start();
  1468. if (scanning_global_lru(sc))
  1469. count_vm_event(ALLOCSTALL);
  1470. /*
  1471. * mem_cgroup will not do shrink_slab.
  1472. */
  1473. if (scanning_global_lru(sc)) {
  1474. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1475. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1476. continue;
  1477. lru_pages += zone_lru_pages(zone);
  1478. }
  1479. }
  1480. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1481. sc->nr_scanned = 0;
  1482. if (!priority)
  1483. disable_swap_token();
  1484. shrink_zones(priority, zonelist, sc);
  1485. /*
  1486. * Don't shrink slabs when reclaiming memory from
  1487. * over limit cgroups
  1488. */
  1489. if (scanning_global_lru(sc)) {
  1490. shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
  1491. if (reclaim_state) {
  1492. sc->nr_reclaimed += reclaim_state->reclaimed_slab;
  1493. reclaim_state->reclaimed_slab = 0;
  1494. }
  1495. }
  1496. total_scanned += sc->nr_scanned;
  1497. if (sc->nr_reclaimed >= sc->swap_cluster_max) {
  1498. ret = sc->nr_reclaimed;
  1499. goto out;
  1500. }
  1501. /*
  1502. * Try to write back as many pages as we just scanned. This
  1503. * tends to cause slow streaming writers to write data to the
  1504. * disk smoothly, at the dirtying rate, which is nice. But
  1505. * that's undesirable in laptop mode, where we *want* lumpy
  1506. * writeout. So in laptop mode, write out the whole world.
  1507. */
  1508. if (total_scanned > sc->swap_cluster_max +
  1509. sc->swap_cluster_max / 2) {
  1510. wakeup_pdflush(laptop_mode ? 0 : total_scanned);
  1511. sc->may_writepage = 1;
  1512. }
  1513. /* Take a nap, wait for some writeback to complete */
  1514. if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
  1515. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1516. }
  1517. /* top priority shrink_zones still had more to do? don't OOM, then */
  1518. if (!sc->all_unreclaimable && scanning_global_lru(sc))
  1519. ret = sc->nr_reclaimed;
  1520. out:
  1521. /*
  1522. * Now that we've scanned all the zones at this priority level, note
  1523. * that level within the zone so that the next thread which performs
  1524. * scanning of this zone will immediately start out at this priority
  1525. * level. This affects only the decision whether or not to bring
  1526. * mapped pages onto the inactive list.
  1527. */
  1528. if (priority < 0)
  1529. priority = 0;
  1530. if (scanning_global_lru(sc)) {
  1531. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
  1532. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1533. continue;
  1534. zone->prev_priority = priority;
  1535. }
  1536. } else
  1537. mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
  1538. delayacct_freepages_end();
  1539. return ret;
  1540. }
  1541. unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  1542. gfp_t gfp_mask, nodemask_t *nodemask)
  1543. {
  1544. struct scan_control sc = {
  1545. .gfp_mask = gfp_mask,
  1546. .may_writepage = !laptop_mode,
  1547. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1548. .may_unmap = 1,
  1549. .may_swap = 1,
  1550. .swappiness = vm_swappiness,
  1551. .order = order,
  1552. .mem_cgroup = NULL,
  1553. .isolate_pages = isolate_pages_global,
  1554. .nodemask = nodemask,
  1555. };
  1556. return do_try_to_free_pages(zonelist, &sc);
  1557. }
  1558. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  1559. unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  1560. gfp_t gfp_mask,
  1561. bool noswap,
  1562. unsigned int swappiness)
  1563. {
  1564. struct scan_control sc = {
  1565. .may_writepage = !laptop_mode,
  1566. .may_unmap = 1,
  1567. .may_swap = !noswap,
  1568. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1569. .swappiness = swappiness,
  1570. .order = 0,
  1571. .mem_cgroup = mem_cont,
  1572. .isolate_pages = mem_cgroup_isolate_pages,
  1573. .nodemask = NULL, /* we don't care the placement */
  1574. };
  1575. struct zonelist *zonelist;
  1576. sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
  1577. (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
  1578. zonelist = NODE_DATA(numa_node_id())->node_zonelists;
  1579. return do_try_to_free_pages(zonelist, &sc);
  1580. }
  1581. #endif
  1582. /*
  1583. * For kswapd, balance_pgdat() will work across all this node's zones until
  1584. * they are all at high_wmark_pages(zone).
  1585. *
  1586. * Returns the number of pages which were actually freed.
  1587. *
  1588. * There is special handling here for zones which are full of pinned pages.
  1589. * This can happen if the pages are all mlocked, or if they are all used by
  1590. * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
  1591. * What we do is to detect the case where all pages in the zone have been
  1592. * scanned twice and there has been zero successful reclaim. Mark the zone as
  1593. * dead and from now on, only perform a short scan. Basically we're polling
  1594. * the zone for when the problem goes away.
  1595. *
  1596. * kswapd scans the zones in the highmem->normal->dma direction. It skips
  1597. * zones which have free_pages > high_wmark_pages(zone), but once a zone is
  1598. * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
  1599. * lower zones regardless of the number of free pages in the lower zones. This
  1600. * interoperates with the page allocator fallback scheme to ensure that aging
  1601. * of pages is balanced across the zones.
  1602. */
  1603. static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
  1604. {
  1605. int all_zones_ok;
  1606. int priority;
  1607. int i;
  1608. unsigned long total_scanned;
  1609. struct reclaim_state *reclaim_state = current->reclaim_state;
  1610. struct scan_control sc = {
  1611. .gfp_mask = GFP_KERNEL,
  1612. .may_unmap = 1,
  1613. .may_swap = 1,
  1614. .swap_cluster_max = SWAP_CLUSTER_MAX,
  1615. .swappiness = vm_swappiness,
  1616. .order = order,
  1617. .mem_cgroup = NULL,
  1618. .isolate_pages = isolate_pages_global,
  1619. };
  1620. /*
  1621. * temp_priority is used to remember the scanning priority at which
  1622. * this zone was successfully refilled to
  1623. * free_pages == high_wmark_pages(zone).
  1624. */
  1625. int temp_priority[MAX_NR_ZONES];
  1626. loop_again:
  1627. total_scanned = 0;
  1628. sc.nr_reclaimed = 0;
  1629. sc.may_writepage = !laptop_mode;
  1630. count_vm_event(PAGEOUTRUN);
  1631. for (i = 0; i < pgdat->nr_zones; i++)
  1632. temp_priority[i] = DEF_PRIORITY;
  1633. for (priority = DEF_PRIORITY; priority >= 0; priority--) {
  1634. int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
  1635. unsigned long lru_pages = 0;
  1636. /* The swap token gets in the way of swapout... */
  1637. if (!priority)
  1638. disable_swap_token();
  1639. all_zones_ok = 1;
  1640. /*
  1641. * Scan in the highmem->dma direction for the highest
  1642. * zone which needs scanning
  1643. */
  1644. for (i = pgdat->nr_zones - 1; i >= 0; i--) {
  1645. struct zone *zone = pgdat->node_zones + i;
  1646. if (!populated_zone(zone))
  1647. continue;
  1648. if (zone_is_all_unreclaimable(zone) &&
  1649. priority != DEF_PRIORITY)
  1650. continue;
  1651. /*
  1652. * Do some background aging of the anon list, to give
  1653. * pages a chance to be referenced before reclaiming.
  1654. */
  1655. if (inactive_anon_is_low(zone, &sc))
  1656. shrink_active_list(SWAP_CLUSTER_MAX, zone,
  1657. &sc, priority, 0);
  1658. if (!zone_watermark_ok(zone, order,
  1659. high_wmark_pages(zone), 0, 0)) {
  1660. end_zone = i;
  1661. break;
  1662. }
  1663. }
  1664. if (i < 0)
  1665. goto out;
  1666. for (i = 0; i <= end_zone; i++) {
  1667. struct zone *zone = pgdat->node_zones + i;
  1668. lru_pages += zone_lru_pages(zone);
  1669. }
  1670. /*
  1671. * Now scan the zone in the dma->highmem direction, stopping
  1672. * at the last zone which needs scanning.
  1673. *
  1674. * We do this because the page allocator works in the opposite
  1675. * direction. This prevents the page allocator from allocating
  1676. * pages behind kswapd's direction of progress, which would
  1677. * cause too much scanning of the lower zones.
  1678. */
  1679. for (i = 0; i <= end_zone; i++) {
  1680. struct zone *zone = pgdat->node_zones + i;
  1681. int nr_slab;
  1682. if (!populated_zone(zone))
  1683. continue;
  1684. if (zone_is_all_unreclaimable(zone) &&
  1685. priority != DEF_PRIORITY)
  1686. continue;
  1687. if (!zone_watermark_ok(zone, order,
  1688. high_wmark_pages(zone), end_zone, 0))
  1689. all_zones_ok = 0;
  1690. temp_priority[i] = priority;
  1691. sc.nr_scanned = 0;
  1692. note_zone_scanning_priority(zone, priority);
  1693. /*
  1694. * We put equal pressure on every zone, unless one
  1695. * zone has way too many pages free already.
  1696. */
  1697. if (!zone_watermark_ok(zone, order,
  1698. 8*high_wmark_pages(zone), end_zone, 0))
  1699. shrink_zone(priority, zone, &sc);
  1700. reclaim_state->reclaimed_slab = 0;
  1701. nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
  1702. lru_pages);
  1703. sc.nr_reclaimed += reclaim_state->reclaimed_slab;
  1704. total_scanned += sc.nr_scanned;
  1705. if (zone_is_all_unreclaimable(zone))
  1706. continue;
  1707. if (nr_slab == 0 && zone->pages_scanned >=
  1708. (zone_lru_pages(zone) * 6))
  1709. zone_set_flag(zone,
  1710. ZONE_ALL_UNRECLAIMABLE);
  1711. /*
  1712. * If we've done a decent amount of scanning and
  1713. * the reclaim ratio is low, start doing writepage
  1714. * even in laptop mode
  1715. */
  1716. if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
  1717. total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
  1718. sc.may_writepage = 1;
  1719. }
  1720. if (all_zones_ok)
  1721. break; /* kswapd: all done */
  1722. /*
  1723. * OK, kswapd is getting into trouble. Take a nap, then take
  1724. * another pass across the zones.
  1725. */
  1726. if (total_scanned && priority < DEF_PRIORITY - 2)
  1727. congestion_wait(BLK_RW_ASYNC, HZ/10);
  1728. /*
  1729. * We do this so kswapd doesn't build up large priorities for
  1730. * example when it is freeing in parallel with allocators. It
  1731. * matches the direct reclaim path behaviour in terms of impact
  1732. * on zone->*_priority.
  1733. */
  1734. if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
  1735. break;
  1736. }
  1737. out:
  1738. /*
  1739. * Note within each zone the priority level at which this zone was
  1740. * brought into a happy state. So that the next thread which scans this
  1741. * zone will start out at that priority level.
  1742. */
  1743. for (i = 0; i < pgdat->nr_zones; i++) {
  1744. struct zone *zone = pgdat->node_zones + i;
  1745. zone->prev_priority = temp_priority[i];
  1746. }
  1747. if (!all_zones_ok) {
  1748. cond_resched();
  1749. try_to_freeze();
  1750. /*
  1751. * Fragmentation may mean that the system cannot be
  1752. * rebalanced for high-order allocations in all zones.
  1753. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
  1754. * it means the zones have been fully scanned and are still
  1755. * not balanced. For high-order allocations, there is
  1756. * little point trying all over again as kswapd may
  1757. * infinite loop.
  1758. *
  1759. * Instead, recheck all watermarks at order-0 as they
  1760. * are the most important. If watermarks are ok, kswapd will go
  1761. * back to sleep. High-order users can still perform direct
  1762. * reclaim if they wish.
  1763. */
  1764. if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
  1765. order = sc.order = 0;
  1766. goto loop_again;
  1767. }
  1768. return sc.nr_reclaimed;
  1769. }
  1770. /*
  1771. * The background pageout daemon, started as a kernel thread
  1772. * from the init process.
  1773. *
  1774. * This basically trickles out pages so that we have _some_
  1775. * free memory available even if there is no other activity
  1776. * that frees anything up. This is needed for things like routing
  1777. * etc, where we otherwise might have all activity going on in
  1778. * asynchronous contexts that cannot page things out.
  1779. *
  1780. * If there are applications that are active memory-allocators
  1781. * (most normal use), this basically shouldn't matter.
  1782. */
  1783. static int kswapd(void *p)
  1784. {
  1785. unsigned long order;
  1786. pg_data_t *pgdat = (pg_data_t*)p;
  1787. struct task_struct *tsk = current;
  1788. DEFINE_WAIT(wait);
  1789. struct reclaim_state reclaim_state = {
  1790. .reclaimed_slab = 0,
  1791. };
  1792. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1793. lockdep_set_current_reclaim_state(GFP_KERNEL);
  1794. if (!cpumask_empty(cpumask))
  1795. set_cpus_allowed_ptr(tsk, cpumask);
  1796. current->reclaim_state = &reclaim_state;
  1797. /*
  1798. * Tell the memory management that we're a "memory allocator",
  1799. * and that if we need more memory we should get access to it
  1800. * regardless (see "__alloc_pages()"). "kswapd" should
  1801. * never get caught in the normal page freeing logic.
  1802. *
  1803. * (Kswapd normally doesn't need memory anyway, but sometimes
  1804. * you need a small amount of memory in order to be able to
  1805. * page out something else, and this flag essentially protects
  1806. * us from recursively trying to free more memory as we're
  1807. * trying to free the first piece of memory in the first place).
  1808. */
  1809. tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
  1810. set_freezable();
  1811. order = 0;
  1812. for ( ; ; ) {
  1813. unsigned long new_order;
  1814. prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
  1815. new_order = pgdat->kswapd_max_order;
  1816. pgdat->kswapd_max_order = 0;
  1817. if (order < new_order) {
  1818. /*
  1819. * Don't sleep if someone wants a larger 'order'
  1820. * allocation
  1821. */
  1822. order = new_order;
  1823. } else {
  1824. if (!freezing(current))
  1825. schedule();
  1826. order = pgdat->kswapd_max_order;
  1827. }
  1828. finish_wait(&pgdat->kswapd_wait, &wait);
  1829. if (!try_to_freeze()) {
  1830. /* We can speed up thawing tasks if we don't call
  1831. * balance_pgdat after returning from the refrigerator
  1832. */
  1833. balance_pgdat(pgdat, order);
  1834. }
  1835. }
  1836. return 0;
  1837. }
  1838. /*
  1839. * A zone is low on free memory, so wake its kswapd task to service it.
  1840. */
  1841. void wakeup_kswapd(struct zone *zone, int order)
  1842. {
  1843. pg_data_t *pgdat;
  1844. if (!populated_zone(zone))
  1845. return;
  1846. pgdat = zone->zone_pgdat;
  1847. if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
  1848. return;
  1849. if (pgdat->kswapd_max_order < order)
  1850. pgdat->kswapd_max_order = order;
  1851. if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
  1852. return;
  1853. if (!waitqueue_active(&pgdat->kswapd_wait))
  1854. return;
  1855. wake_up_interruptible(&pgdat->kswapd_wait);
  1856. }
  1857. unsigned long global_lru_pages(void)
  1858. {
  1859. return global_page_state(NR_ACTIVE_ANON)
  1860. + global_page_state(NR_ACTIVE_FILE)
  1861. + global_page_state(NR_INACTIVE_ANON)
  1862. + global_page_state(NR_INACTIVE_FILE);
  1863. }
  1864. #ifdef CONFIG_HIBERNATION
  1865. /*
  1866. * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
  1867. * from LRU lists system-wide, for given pass and priority.
  1868. *
  1869. * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  1870. */
  1871. static void shrink_all_zones(unsigned long nr_pages, int prio,
  1872. int pass, struct scan_control *sc)
  1873. {
  1874. struct zone *zone;
  1875. unsigned long nr_reclaimed = 0;
  1876. for_each_populated_zone(zone) {
  1877. enum lru_list l;
  1878. if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
  1879. continue;
  1880. for_each_evictable_lru(l) {
  1881. enum zone_stat_item ls = NR_LRU_BASE + l;
  1882. unsigned long lru_pages = zone_page_state(zone, ls);
  1883. /* For pass = 0, we don't shrink the active list */
  1884. if (pass == 0 && (l == LRU_ACTIVE_ANON ||
  1885. l == LRU_ACTIVE_FILE))
  1886. continue;
  1887. zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
  1888. if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
  1889. unsigned long nr_to_scan;
  1890. zone->lru[l].nr_saved_scan = 0;
  1891. nr_to_scan = min(nr_pages, lru_pages);
  1892. nr_reclaimed += shrink_list(l, nr_to_scan, zone,
  1893. sc, prio);
  1894. if (nr_reclaimed >= nr_pages) {
  1895. sc->nr_reclaimed += nr_reclaimed;
  1896. return;
  1897. }
  1898. }
  1899. }
  1900. }
  1901. sc->nr_reclaimed += nr_reclaimed;
  1902. }
  1903. /*
  1904. * Try to free `nr_pages' of memory, system-wide, and return the number of
  1905. * freed pages.
  1906. *
  1907. * Rather than trying to age LRUs the aim is to preserve the overall
  1908. * LRU order by reclaiming preferentially
  1909. * inactive > active > active referenced > active mapped
  1910. */
  1911. unsigned long shrink_all_memory(unsigned long nr_pages)
  1912. {
  1913. unsigned long lru_pages, nr_slab;
  1914. int pass;
  1915. struct reclaim_state reclaim_state;
  1916. struct scan_control sc = {
  1917. .gfp_mask = GFP_KERNEL,
  1918. .may_unmap = 0,
  1919. .may_writepage = 1,
  1920. .isolate_pages = isolate_pages_global,
  1921. .nr_reclaimed = 0,
  1922. };
  1923. current->reclaim_state = &reclaim_state;
  1924. lru_pages = global_lru_pages();
  1925. nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
  1926. /* If slab caches are huge, it's better to hit them first */
  1927. while (nr_slab >= lru_pages) {
  1928. reclaim_state.reclaimed_slab = 0;
  1929. shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
  1930. if (!reclaim_state.reclaimed_slab)
  1931. break;
  1932. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  1933. if (sc.nr_reclaimed >= nr_pages)
  1934. goto out;
  1935. nr_slab -= reclaim_state.reclaimed_slab;
  1936. }
  1937. /*
  1938. * We try to shrink LRUs in 5 passes:
  1939. * 0 = Reclaim from inactive_list only
  1940. * 1 = Reclaim from active list but don't reclaim mapped
  1941. * 2 = 2nd pass of type 1
  1942. * 3 = Reclaim mapped (normal reclaim)
  1943. * 4 = 2nd pass of type 3
  1944. */
  1945. for (pass = 0; pass < 5; pass++) {
  1946. int prio;
  1947. /* Force reclaiming mapped pages in the passes #3 and #4 */
  1948. if (pass > 2)
  1949. sc.may_unmap = 1;
  1950. for (prio = DEF_PRIORITY; prio >= 0; prio--) {
  1951. unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
  1952. sc.nr_scanned = 0;
  1953. sc.swap_cluster_max = nr_to_scan;
  1954. shrink_all_zones(nr_to_scan, prio, pass, &sc);
  1955. if (sc.nr_reclaimed >= nr_pages)
  1956. goto out;
  1957. reclaim_state.reclaimed_slab = 0;
  1958. shrink_slab(sc.nr_scanned, sc.gfp_mask,
  1959. global_lru_pages());
  1960. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  1961. if (sc.nr_reclaimed >= nr_pages)
  1962. goto out;
  1963. if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
  1964. congestion_wait(BLK_RW_ASYNC, HZ / 10);
  1965. }
  1966. }
  1967. /*
  1968. * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
  1969. * something in slab caches
  1970. */
  1971. if (!sc.nr_reclaimed) {
  1972. do {
  1973. reclaim_state.reclaimed_slab = 0;
  1974. shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
  1975. sc.nr_reclaimed += reclaim_state.reclaimed_slab;
  1976. } while (sc.nr_reclaimed < nr_pages &&
  1977. reclaim_state.reclaimed_slab > 0);
  1978. }
  1979. out:
  1980. current->reclaim_state = NULL;
  1981. return sc.nr_reclaimed;
  1982. }
  1983. #endif /* CONFIG_HIBERNATION */
  1984. /* It's optimal to keep kswapds on the same CPUs as their memory, but
  1985. not required for correctness. So if the last cpu in a node goes
  1986. away, we get changed to run anywhere: as the first one comes back,
  1987. restore their cpu bindings. */
  1988. static int __devinit cpu_callback(struct notifier_block *nfb,
  1989. unsigned long action, void *hcpu)
  1990. {
  1991. int nid;
  1992. if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  1993. for_each_node_state(nid, N_HIGH_MEMORY) {
  1994. pg_data_t *pgdat = NODE_DATA(nid);
  1995. const struct cpumask *mask;
  1996. mask = cpumask_of_node(pgdat->node_id);
  1997. if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  1998. /* One of our CPUs online: restore mask */
  1999. set_cpus_allowed_ptr(pgdat->kswapd, mask);
  2000. }
  2001. }
  2002. return NOTIFY_OK;
  2003. }
  2004. /*
  2005. * This kswapd start function will be called by init and node-hot-add.
  2006. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
  2007. */
  2008. int kswapd_run(int nid)
  2009. {
  2010. pg_data_t *pgdat = NODE_DATA(nid);
  2011. int ret = 0;
  2012. if (pgdat->kswapd)
  2013. return 0;
  2014. pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
  2015. if (IS_ERR(pgdat->kswapd)) {
  2016. /* failure at boot is fatal */
  2017. BUG_ON(system_state == SYSTEM_BOOTING);
  2018. printk("Failed to start kswapd on node %d\n",nid);
  2019. ret = -1;
  2020. }
  2021. return ret;
  2022. }
  2023. static int __init kswapd_init(void)
  2024. {
  2025. int nid;
  2026. swap_setup();
  2027. for_each_node_state(nid, N_HIGH_MEMORY)
  2028. kswapd_run(nid);
  2029. hotcpu_notifier(cpu_callback, 0);
  2030. return 0;
  2031. }
  2032. module_init(kswapd_init)
  2033. #ifdef CONFIG_NUMA
  2034. /*
  2035. * Zone reclaim mode
  2036. *
  2037. * If non-zero call zone_reclaim when the number of free pages falls below
  2038. * the watermarks.
  2039. */
  2040. int zone_reclaim_mode __read_mostly;
  2041. #define RECLAIM_OFF 0
  2042. #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
  2043. #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
  2044. #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
  2045. /*
  2046. * Priority for ZONE_RECLAIM. This determines the fraction of pages
  2047. * of a node considered for each zone_reclaim. 4 scans 1/16th of
  2048. * a zone.
  2049. */
  2050. #define ZONE_RECLAIM_PRIORITY 4
  2051. /*
  2052. * Percentage of pages in a zone that must be unmapped for zone_reclaim to
  2053. * occur.
  2054. */
  2055. int sysctl_min_unmapped_ratio = 1;
  2056. /*
  2057. * If the number of slab pages in a zone grows beyond this percentage then
  2058. * slab reclaim needs to occur.
  2059. */
  2060. int sysctl_min_slab_ratio = 5;
  2061. static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
  2062. {
  2063. unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
  2064. unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
  2065. zone_page_state(zone, NR_ACTIVE_FILE);
  2066. /*
  2067. * It's possible for there to be more file mapped pages than
  2068. * accounted for by the pages on the file LRU lists because
  2069. * tmpfs pages accounted for as ANON can also be FILE_MAPPED
  2070. */
  2071. return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
  2072. }
  2073. /* Work out how many page cache pages we can reclaim in this reclaim_mode */
  2074. static long zone_pagecache_reclaimable(struct zone *zone)
  2075. {
  2076. long nr_pagecache_reclaimable;
  2077. long delta = 0;
  2078. /*
  2079. * If RECLAIM_SWAP is set, then all file pages are considered
  2080. * potentially reclaimable. Otherwise, we have to worry about
  2081. * pages like swapcache and zone_unmapped_file_pages() provides
  2082. * a better estimate
  2083. */
  2084. if (zone_reclaim_mode & RECLAIM_SWAP)
  2085. nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
  2086. else
  2087. nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
  2088. /* If we can't clean pages, remove dirty pages from consideration */
  2089. if (!(zone_reclaim_mode & RECLAIM_WRITE))
  2090. delta += zone_page_state(zone, NR_FILE_DIRTY);
  2091. /* Watch for any possible underflows due to delta */
  2092. if (unlikely(delta > nr_pagecache_reclaimable))
  2093. delta = nr_pagecache_reclaimable;
  2094. return nr_pagecache_reclaimable - delta;
  2095. }
  2096. /*
  2097. * Try to free up some pages from this zone through reclaim.
  2098. */
  2099. static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2100. {
  2101. /* Minimum pages needed in order to stay on node */
  2102. const unsigned long nr_pages = 1 << order;
  2103. struct task_struct *p = current;
  2104. struct reclaim_state reclaim_state;
  2105. int priority;
  2106. struct scan_control sc = {
  2107. .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
  2108. .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
  2109. .may_swap = 1,
  2110. .swap_cluster_max = max_t(unsigned long, nr_pages,
  2111. SWAP_CLUSTER_MAX),
  2112. .gfp_mask = gfp_mask,
  2113. .swappiness = vm_swappiness,
  2114. .order = order,
  2115. .isolate_pages = isolate_pages_global,
  2116. };
  2117. unsigned long slab_reclaimable;
  2118. disable_swap_token();
  2119. cond_resched();
  2120. /*
  2121. * We need to be able to allocate from the reserves for RECLAIM_SWAP
  2122. * and we also need to be able to write out pages for RECLAIM_WRITE
  2123. * and RECLAIM_SWAP.
  2124. */
  2125. p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
  2126. reclaim_state.reclaimed_slab = 0;
  2127. p->reclaim_state = &reclaim_state;
  2128. if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
  2129. /*
  2130. * Free memory by calling shrink zone with increasing
  2131. * priorities until we have enough memory freed.
  2132. */
  2133. priority = ZONE_RECLAIM_PRIORITY;
  2134. do {
  2135. note_zone_scanning_priority(zone, priority);
  2136. shrink_zone(priority, zone, &sc);
  2137. priority--;
  2138. } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
  2139. }
  2140. slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2141. if (slab_reclaimable > zone->min_slab_pages) {
  2142. /*
  2143. * shrink_slab() does not currently allow us to determine how
  2144. * many pages were freed in this zone. So we take the current
  2145. * number of slab pages and shake the slab until it is reduced
  2146. * by the same nr_pages that we used for reclaiming unmapped
  2147. * pages.
  2148. *
  2149. * Note that shrink_slab will free memory on all zones and may
  2150. * take a long time.
  2151. */
  2152. while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
  2153. zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
  2154. slab_reclaimable - nr_pages)
  2155. ;
  2156. /*
  2157. * Update nr_reclaimed by the number of slab pages we
  2158. * reclaimed from this zone.
  2159. */
  2160. sc.nr_reclaimed += slab_reclaimable -
  2161. zone_page_state(zone, NR_SLAB_RECLAIMABLE);
  2162. }
  2163. p->reclaim_state = NULL;
  2164. current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
  2165. return sc.nr_reclaimed >= nr_pages;
  2166. }
  2167. int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
  2168. {
  2169. int node_id;
  2170. int ret;
  2171. /*
  2172. * Zone reclaim reclaims unmapped file backed pages and
  2173. * slab pages if we are over the defined limits.
  2174. *
  2175. * A small portion of unmapped file backed pages is needed for
  2176. * file I/O otherwise pages read by file I/O will be immediately
  2177. * thrown out if the zone is overallocated. So we do not reclaim
  2178. * if less than a specified percentage of the zone is used by
  2179. * unmapped file backed pages.
  2180. */
  2181. if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
  2182. zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
  2183. return ZONE_RECLAIM_FULL;
  2184. if (zone_is_all_unreclaimable(zone))
  2185. return ZONE_RECLAIM_FULL;
  2186. /*
  2187. * Do not scan if the allocation should not be delayed.
  2188. */
  2189. if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
  2190. return ZONE_RECLAIM_NOSCAN;
  2191. /*
  2192. * Only run zone reclaim on the local zone or on zones that do not
  2193. * have associated processors. This will favor the local processor
  2194. * over remote processors and spread off node memory allocations
  2195. * as wide as possible.
  2196. */
  2197. node_id = zone_to_nid(zone);
  2198. if (node_state(node_id, N_CPU) && node_id != numa_node_id())
  2199. return ZONE_RECLAIM_NOSCAN;
  2200. if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
  2201. return ZONE_RECLAIM_NOSCAN;
  2202. ret = __zone_reclaim(zone, gfp_mask, order);
  2203. zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
  2204. if (!ret)
  2205. count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
  2206. return ret;
  2207. }
  2208. #endif
  2209. /*
  2210. * page_evictable - test whether a page is evictable
  2211. * @page: the page to test
  2212. * @vma: the VMA in which the page is or will be mapped, may be NULL
  2213. *
  2214. * Test whether page is evictable--i.e., should be placed on active/inactive
  2215. * lists vs unevictable list. The vma argument is !NULL when called from the
  2216. * fault path to determine how to instantate a new page.
  2217. *
  2218. * Reasons page might not be evictable:
  2219. * (1) page's mapping marked unevictable
  2220. * (2) page is part of an mlocked VMA
  2221. *
  2222. */
  2223. int page_evictable(struct page *page, struct vm_area_struct *vma)
  2224. {
  2225. if (mapping_unevictable(page_mapping(page)))
  2226. return 0;
  2227. if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
  2228. return 0;
  2229. return 1;
  2230. }
  2231. /**
  2232. * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
  2233. * @page: page to check evictability and move to appropriate lru list
  2234. * @zone: zone page is in
  2235. *
  2236. * Checks a page for evictability and moves the page to the appropriate
  2237. * zone lru list.
  2238. *
  2239. * Restrictions: zone->lru_lock must be held, page must be on LRU and must
  2240. * have PageUnevictable set.
  2241. */
  2242. static void check_move_unevictable_page(struct page *page, struct zone *zone)
  2243. {
  2244. VM_BUG_ON(PageActive(page));
  2245. retry:
  2246. ClearPageUnevictable(page);
  2247. if (page_evictable(page, NULL)) {
  2248. enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
  2249. __dec_zone_state(zone, NR_UNEVICTABLE);
  2250. list_move(&page->lru, &zone->lru[l].list);
  2251. mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
  2252. __inc_zone_state(zone, NR_INACTIVE_ANON + l);
  2253. __count_vm_event(UNEVICTABLE_PGRESCUED);
  2254. } else {
  2255. /*
  2256. * rotate unevictable list
  2257. */
  2258. SetPageUnevictable(page);
  2259. list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
  2260. mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
  2261. if (page_evictable(page, NULL))
  2262. goto retry;
  2263. }
  2264. }
  2265. /**
  2266. * scan_mapping_unevictable_pages - scan an address space for evictable pages
  2267. * @mapping: struct address_space to scan for evictable pages
  2268. *
  2269. * Scan all pages in mapping. Check unevictable pages for
  2270. * evictability and move them to the appropriate zone lru list.
  2271. */
  2272. void scan_mapping_unevictable_pages(struct address_space *mapping)
  2273. {
  2274. pgoff_t next = 0;
  2275. pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
  2276. PAGE_CACHE_SHIFT;
  2277. struct zone *zone;
  2278. struct pagevec pvec;
  2279. if (mapping->nrpages == 0)
  2280. return;
  2281. pagevec_init(&pvec, 0);
  2282. while (next < end &&
  2283. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  2284. int i;
  2285. int pg_scanned = 0;
  2286. zone = NULL;
  2287. for (i = 0; i < pagevec_count(&pvec); i++) {
  2288. struct page *page = pvec.pages[i];
  2289. pgoff_t page_index = page->index;
  2290. struct zone *pagezone = page_zone(page);
  2291. pg_scanned++;
  2292. if (page_index > next)
  2293. next = page_index;
  2294. next++;
  2295. if (pagezone != zone) {
  2296. if (zone)
  2297. spin_unlock_irq(&zone->lru_lock);
  2298. zone = pagezone;
  2299. spin_lock_irq(&zone->lru_lock);
  2300. }
  2301. if (PageLRU(page) && PageUnevictable(page))
  2302. check_move_unevictable_page(page, zone);
  2303. }
  2304. if (zone)
  2305. spin_unlock_irq(&zone->lru_lock);
  2306. pagevec_release(&pvec);
  2307. count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
  2308. }
  2309. }
  2310. /**
  2311. * scan_zone_unevictable_pages - check unevictable list for evictable pages
  2312. * @zone - zone of which to scan the unevictable list
  2313. *
  2314. * Scan @zone's unevictable LRU lists to check for pages that have become
  2315. * evictable. Move those that have to @zone's inactive list where they
  2316. * become candidates for reclaim, unless shrink_inactive_zone() decides
  2317. * to reactivate them. Pages that are still unevictable are rotated
  2318. * back onto @zone's unevictable list.
  2319. */
  2320. #define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
  2321. static void scan_zone_unevictable_pages(struct zone *zone)
  2322. {
  2323. struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
  2324. unsigned long scan;
  2325. unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
  2326. while (nr_to_scan > 0) {
  2327. unsigned long batch_size = min(nr_to_scan,
  2328. SCAN_UNEVICTABLE_BATCH_SIZE);
  2329. spin_lock_irq(&zone->lru_lock);
  2330. for (scan = 0; scan < batch_size; scan++) {
  2331. struct page *page = lru_to_page(l_unevictable);
  2332. if (!trylock_page(page))
  2333. continue;
  2334. prefetchw_prev_lru_page(page, l_unevictable, flags);
  2335. if (likely(PageLRU(page) && PageUnevictable(page)))
  2336. check_move_unevictable_page(page, zone);
  2337. unlock_page(page);
  2338. }
  2339. spin_unlock_irq(&zone->lru_lock);
  2340. nr_to_scan -= batch_size;
  2341. }
  2342. }
  2343. /**
  2344. * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
  2345. *
  2346. * A really big hammer: scan all zones' unevictable LRU lists to check for
  2347. * pages that have become evictable. Move those back to the zones'
  2348. * inactive list where they become candidates for reclaim.
  2349. * This occurs when, e.g., we have unswappable pages on the unevictable lists,
  2350. * and we add swap to the system. As such, it runs in the context of a task
  2351. * that has possibly/probably made some previously unevictable pages
  2352. * evictable.
  2353. */
  2354. static void scan_all_zones_unevictable_pages(void)
  2355. {
  2356. struct zone *zone;
  2357. for_each_zone(zone) {
  2358. scan_zone_unevictable_pages(zone);
  2359. }
  2360. }
  2361. /*
  2362. * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
  2363. * all nodes' unevictable lists for evictable pages
  2364. */
  2365. unsigned long scan_unevictable_pages;
  2366. int scan_unevictable_handler(struct ctl_table *table, int write,
  2367. struct file *file, void __user *buffer,
  2368. size_t *length, loff_t *ppos)
  2369. {
  2370. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  2371. if (write && *(unsigned long *)table->data)
  2372. scan_all_zones_unevictable_pages();
  2373. scan_unevictable_pages = 0;
  2374. return 0;
  2375. }
  2376. /*
  2377. * per node 'scan_unevictable_pages' attribute. On demand re-scan of
  2378. * a specified node's per zone unevictable lists for evictable pages.
  2379. */
  2380. static ssize_t read_scan_unevictable_node(struct sys_device *dev,
  2381. struct sysdev_attribute *attr,
  2382. char *buf)
  2383. {
  2384. return sprintf(buf, "0\n"); /* always zero; should fit... */
  2385. }
  2386. static ssize_t write_scan_unevictable_node(struct sys_device *dev,
  2387. struct sysdev_attribute *attr,
  2388. const char *buf, size_t count)
  2389. {
  2390. struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
  2391. struct zone *zone;
  2392. unsigned long res;
  2393. unsigned long req = strict_strtoul(buf, 10, &res);
  2394. if (!req)
  2395. return 1; /* zero is no-op */
  2396. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  2397. if (!populated_zone(zone))
  2398. continue;
  2399. scan_zone_unevictable_pages(zone);
  2400. }
  2401. return 1;
  2402. }
  2403. static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
  2404. read_scan_unevictable_node,
  2405. write_scan_unevictable_node);
  2406. int scan_unevictable_register_node(struct node *node)
  2407. {
  2408. return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
  2409. }
  2410. void scan_unevictable_unregister_node(struct node *node)
  2411. {
  2412. sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
  2413. }