page_alloc.c 139 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/compiler.h>
  24. #include <linux/kernel.h>
  25. #include <linux/kmemcheck.h>
  26. #include <linux/module.h>
  27. #include <linux/suspend.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/slab.h>
  31. #include <linux/oom.h>
  32. #include <linux/notifier.h>
  33. #include <linux/topology.h>
  34. #include <linux/sysctl.h>
  35. #include <linux/cpu.h>
  36. #include <linux/cpuset.h>
  37. #include <linux/memory_hotplug.h>
  38. #include <linux/nodemask.h>
  39. #include <linux/vmalloc.h>
  40. #include <linux/mempolicy.h>
  41. #include <linux/stop_machine.h>
  42. #include <linux/sort.h>
  43. #include <linux/pfn.h>
  44. #include <linux/backing-dev.h>
  45. #include <linux/fault-inject.h>
  46. #include <linux/page-isolation.h>
  47. #include <linux/page_cgroup.h>
  48. #include <linux/debugobjects.h>
  49. #include <linux/kmemleak.h>
  50. #include <trace/events/kmem.h>
  51. #include <asm/tlbflush.h>
  52. #include <asm/div64.h>
  53. #include "internal.h"
  54. /*
  55. * Array of node states.
  56. */
  57. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  58. [N_POSSIBLE] = NODE_MASK_ALL,
  59. [N_ONLINE] = { { [0] = 1UL } },
  60. #ifndef CONFIG_NUMA
  61. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  62. #ifdef CONFIG_HIGHMEM
  63. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  64. #endif
  65. [N_CPU] = { { [0] = 1UL } },
  66. #endif /* NUMA */
  67. };
  68. EXPORT_SYMBOL(node_states);
  69. unsigned long totalram_pages __read_mostly;
  70. unsigned long totalreserve_pages __read_mostly;
  71. int percpu_pagelist_fraction;
  72. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  73. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  74. int pageblock_order __read_mostly;
  75. #endif
  76. static void __free_pages_ok(struct page *page, unsigned int order);
  77. /*
  78. * results with 256, 32 in the lowmem_reserve sysctl:
  79. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  80. * 1G machine -> (16M dma, 784M normal, 224M high)
  81. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  82. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  83. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  84. *
  85. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  86. * don't need any ZONE_NORMAL reservation
  87. */
  88. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  89. #ifdef CONFIG_ZONE_DMA
  90. 256,
  91. #endif
  92. #ifdef CONFIG_ZONE_DMA32
  93. 256,
  94. #endif
  95. #ifdef CONFIG_HIGHMEM
  96. 32,
  97. #endif
  98. 32,
  99. };
  100. EXPORT_SYMBOL(totalram_pages);
  101. static char * const zone_names[MAX_NR_ZONES] = {
  102. #ifdef CONFIG_ZONE_DMA
  103. "DMA",
  104. #endif
  105. #ifdef CONFIG_ZONE_DMA32
  106. "DMA32",
  107. #endif
  108. "Normal",
  109. #ifdef CONFIG_HIGHMEM
  110. "HighMem",
  111. #endif
  112. "Movable",
  113. };
  114. int min_free_kbytes = 1024;
  115. static unsigned long __meminitdata nr_kernel_pages;
  116. static unsigned long __meminitdata nr_all_pages;
  117. static unsigned long __meminitdata dma_reserve;
  118. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  119. /*
  120. * MAX_ACTIVE_REGIONS determines the maximum number of distinct
  121. * ranges of memory (RAM) that may be registered with add_active_range().
  122. * Ranges passed to add_active_range() will be merged if possible
  123. * so the number of times add_active_range() can be called is
  124. * related to the number of nodes and the number of holes
  125. */
  126. #ifdef CONFIG_MAX_ACTIVE_REGIONS
  127. /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
  128. #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  129. #else
  130. #if MAX_NUMNODES >= 32
  131. /* If there can be many nodes, allow up to 50 holes per node */
  132. #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
  133. #else
  134. /* By default, allow up to 256 distinct regions */
  135. #define MAX_ACTIVE_REGIONS 256
  136. #endif
  137. #endif
  138. static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  139. static int __meminitdata nr_nodemap_entries;
  140. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  141. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  142. static unsigned long __initdata required_kernelcore;
  143. static unsigned long __initdata required_movablecore;
  144. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  145. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  146. int movable_zone;
  147. EXPORT_SYMBOL(movable_zone);
  148. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  149. #if MAX_NUMNODES > 1
  150. int nr_node_ids __read_mostly = MAX_NUMNODES;
  151. int nr_online_nodes __read_mostly = 1;
  152. EXPORT_SYMBOL(nr_node_ids);
  153. EXPORT_SYMBOL(nr_online_nodes);
  154. #endif
  155. int page_group_by_mobility_disabled __read_mostly;
  156. static void set_pageblock_migratetype(struct page *page, int migratetype)
  157. {
  158. if (unlikely(page_group_by_mobility_disabled))
  159. migratetype = MIGRATE_UNMOVABLE;
  160. set_pageblock_flags_group(page, (unsigned long)migratetype,
  161. PB_migrate, PB_migrate_end);
  162. }
  163. bool oom_killer_disabled __read_mostly;
  164. #ifdef CONFIG_DEBUG_VM
  165. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  166. {
  167. int ret = 0;
  168. unsigned seq;
  169. unsigned long pfn = page_to_pfn(page);
  170. do {
  171. seq = zone_span_seqbegin(zone);
  172. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  173. ret = 1;
  174. else if (pfn < zone->zone_start_pfn)
  175. ret = 1;
  176. } while (zone_span_seqretry(zone, seq));
  177. return ret;
  178. }
  179. static int page_is_consistent(struct zone *zone, struct page *page)
  180. {
  181. if (!pfn_valid_within(page_to_pfn(page)))
  182. return 0;
  183. if (zone != page_zone(page))
  184. return 0;
  185. return 1;
  186. }
  187. /*
  188. * Temporary debugging check for pages not lying within a given zone.
  189. */
  190. static int bad_range(struct zone *zone, struct page *page)
  191. {
  192. if (page_outside_zone_boundaries(zone, page))
  193. return 1;
  194. if (!page_is_consistent(zone, page))
  195. return 1;
  196. return 0;
  197. }
  198. #else
  199. static inline int bad_range(struct zone *zone, struct page *page)
  200. {
  201. return 0;
  202. }
  203. #endif
  204. static void bad_page(struct page *page)
  205. {
  206. static unsigned long resume;
  207. static unsigned long nr_shown;
  208. static unsigned long nr_unshown;
  209. /* Don't complain about poisoned pages */
  210. if (PageHWPoison(page)) {
  211. __ClearPageBuddy(page);
  212. return;
  213. }
  214. /*
  215. * Allow a burst of 60 reports, then keep quiet for that minute;
  216. * or allow a steady drip of one report per second.
  217. */
  218. if (nr_shown == 60) {
  219. if (time_before(jiffies, resume)) {
  220. nr_unshown++;
  221. goto out;
  222. }
  223. if (nr_unshown) {
  224. printk(KERN_ALERT
  225. "BUG: Bad page state: %lu messages suppressed\n",
  226. nr_unshown);
  227. nr_unshown = 0;
  228. }
  229. nr_shown = 0;
  230. }
  231. if (nr_shown++ == 0)
  232. resume = jiffies + 60 * HZ;
  233. printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
  234. current->comm, page_to_pfn(page));
  235. printk(KERN_ALERT
  236. "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
  237. page, (void *)page->flags, page_count(page),
  238. page_mapcount(page), page->mapping, page->index);
  239. dump_stack();
  240. out:
  241. /* Leave bad fields for debug, except PageBuddy could make trouble */
  242. __ClearPageBuddy(page);
  243. add_taint(TAINT_BAD_PAGE);
  244. }
  245. /*
  246. * Higher-order pages are called "compound pages". They are structured thusly:
  247. *
  248. * The first PAGE_SIZE page is called the "head page".
  249. *
  250. * The remaining PAGE_SIZE pages are called "tail pages".
  251. *
  252. * All pages have PG_compound set. All pages have their ->private pointing at
  253. * the head page (even the head page has this).
  254. *
  255. * The first tail page's ->lru.next holds the address of the compound page's
  256. * put_page() function. Its ->lru.prev holds the order of allocation.
  257. * This usage means that zero-order pages may not be compound.
  258. */
  259. static void free_compound_page(struct page *page)
  260. {
  261. __free_pages_ok(page, compound_order(page));
  262. }
  263. void prep_compound_page(struct page *page, unsigned long order)
  264. {
  265. int i;
  266. int nr_pages = 1 << order;
  267. set_compound_page_dtor(page, free_compound_page);
  268. set_compound_order(page, order);
  269. __SetPageHead(page);
  270. for (i = 1; i < nr_pages; i++) {
  271. struct page *p = page + i;
  272. __SetPageTail(p);
  273. p->first_page = page;
  274. }
  275. }
  276. static int destroy_compound_page(struct page *page, unsigned long order)
  277. {
  278. int i;
  279. int nr_pages = 1 << order;
  280. int bad = 0;
  281. if (unlikely(compound_order(page) != order) ||
  282. unlikely(!PageHead(page))) {
  283. bad_page(page);
  284. bad++;
  285. }
  286. __ClearPageHead(page);
  287. for (i = 1; i < nr_pages; i++) {
  288. struct page *p = page + i;
  289. if (unlikely(!PageTail(p) || (p->first_page != page))) {
  290. bad_page(page);
  291. bad++;
  292. }
  293. __ClearPageTail(p);
  294. }
  295. return bad;
  296. }
  297. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  298. {
  299. int i;
  300. /*
  301. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  302. * and __GFP_HIGHMEM from hard or soft interrupt context.
  303. */
  304. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  305. for (i = 0; i < (1 << order); i++)
  306. clear_highpage(page + i);
  307. }
  308. static inline void set_page_order(struct page *page, int order)
  309. {
  310. set_page_private(page, order);
  311. __SetPageBuddy(page);
  312. }
  313. static inline void rmv_page_order(struct page *page)
  314. {
  315. __ClearPageBuddy(page);
  316. set_page_private(page, 0);
  317. }
  318. /*
  319. * Locate the struct page for both the matching buddy in our
  320. * pair (buddy1) and the combined O(n+1) page they form (page).
  321. *
  322. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  323. * the following equation:
  324. * B2 = B1 ^ (1 << O)
  325. * For example, if the starting buddy (buddy2) is #8 its order
  326. * 1 buddy is #10:
  327. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  328. *
  329. * 2) Any buddy B will have an order O+1 parent P which
  330. * satisfies the following equation:
  331. * P = B & ~(1 << O)
  332. *
  333. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  334. */
  335. static inline struct page *
  336. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  337. {
  338. unsigned long buddy_idx = page_idx ^ (1 << order);
  339. return page + (buddy_idx - page_idx);
  340. }
  341. static inline unsigned long
  342. __find_combined_index(unsigned long page_idx, unsigned int order)
  343. {
  344. return (page_idx & ~(1 << order));
  345. }
  346. /*
  347. * This function checks whether a page is free && is the buddy
  348. * we can do coalesce a page and its buddy if
  349. * (a) the buddy is not in a hole &&
  350. * (b) the buddy is in the buddy system &&
  351. * (c) a page and its buddy have the same order &&
  352. * (d) a page and its buddy are in the same zone.
  353. *
  354. * For recording whether a page is in the buddy system, we use PG_buddy.
  355. * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
  356. *
  357. * For recording page's order, we use page_private(page).
  358. */
  359. static inline int page_is_buddy(struct page *page, struct page *buddy,
  360. int order)
  361. {
  362. if (!pfn_valid_within(page_to_pfn(buddy)))
  363. return 0;
  364. if (page_zone_id(page) != page_zone_id(buddy))
  365. return 0;
  366. if (PageBuddy(buddy) && page_order(buddy) == order) {
  367. VM_BUG_ON(page_count(buddy) != 0);
  368. return 1;
  369. }
  370. return 0;
  371. }
  372. /*
  373. * Freeing function for a buddy system allocator.
  374. *
  375. * The concept of a buddy system is to maintain direct-mapped table
  376. * (containing bit values) for memory blocks of various "orders".
  377. * The bottom level table contains the map for the smallest allocatable
  378. * units of memory (here, pages), and each level above it describes
  379. * pairs of units from the levels below, hence, "buddies".
  380. * At a high level, all that happens here is marking the table entry
  381. * at the bottom level available, and propagating the changes upward
  382. * as necessary, plus some accounting needed to play nicely with other
  383. * parts of the VM system.
  384. * At each level, we keep a list of pages, which are heads of continuous
  385. * free pages of length of (1 << order) and marked with PG_buddy. Page's
  386. * order is recorded in page_private(page) field.
  387. * So when we are allocating or freeing one, we can derive the state of the
  388. * other. That is, if we allocate a small block, and both were
  389. * free, the remainder of the region must be split into blocks.
  390. * If a block is freed, and its buddy is also free, then this
  391. * triggers coalescing into a block of larger size.
  392. *
  393. * -- wli
  394. */
  395. static inline void __free_one_page(struct page *page,
  396. struct zone *zone, unsigned int order,
  397. int migratetype)
  398. {
  399. unsigned long page_idx;
  400. if (unlikely(PageCompound(page)))
  401. if (unlikely(destroy_compound_page(page, order)))
  402. return;
  403. VM_BUG_ON(migratetype == -1);
  404. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  405. VM_BUG_ON(page_idx & ((1 << order) - 1));
  406. VM_BUG_ON(bad_range(zone, page));
  407. while (order < MAX_ORDER-1) {
  408. unsigned long combined_idx;
  409. struct page *buddy;
  410. buddy = __page_find_buddy(page, page_idx, order);
  411. if (!page_is_buddy(page, buddy, order))
  412. break;
  413. /* Our buddy is free, merge with it and move up one order. */
  414. list_del(&buddy->lru);
  415. zone->free_area[order].nr_free--;
  416. rmv_page_order(buddy);
  417. combined_idx = __find_combined_index(page_idx, order);
  418. page = page + (combined_idx - page_idx);
  419. page_idx = combined_idx;
  420. order++;
  421. }
  422. set_page_order(page, order);
  423. list_add(&page->lru,
  424. &zone->free_area[order].free_list[migratetype]);
  425. zone->free_area[order].nr_free++;
  426. }
  427. /*
  428. * free_page_mlock() -- clean up attempts to free and mlocked() page.
  429. * Page should not be on lru, so no need to fix that up.
  430. * free_pages_check() will verify...
  431. */
  432. static inline void free_page_mlock(struct page *page)
  433. {
  434. __dec_zone_page_state(page, NR_MLOCK);
  435. __count_vm_event(UNEVICTABLE_MLOCKFREED);
  436. }
  437. static inline int free_pages_check(struct page *page)
  438. {
  439. if (unlikely(page_mapcount(page) |
  440. (page->mapping != NULL) |
  441. (atomic_read(&page->_count) != 0) |
  442. (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
  443. bad_page(page);
  444. return 1;
  445. }
  446. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  447. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  448. return 0;
  449. }
  450. /*
  451. * Frees a number of pages from the PCP lists
  452. * Assumes all pages on list are in same zone, and of same order.
  453. * count is the number of pages to free.
  454. *
  455. * If the zone was previously in an "all pages pinned" state then look to
  456. * see if this freeing clears that state.
  457. *
  458. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  459. * pinned" detection logic.
  460. */
  461. static void free_pcppages_bulk(struct zone *zone, int count,
  462. struct per_cpu_pages *pcp)
  463. {
  464. int migratetype = 0;
  465. int batch_free = 0;
  466. spin_lock(&zone->lock);
  467. zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
  468. zone->pages_scanned = 0;
  469. __mod_zone_page_state(zone, NR_FREE_PAGES, count);
  470. while (count) {
  471. struct page *page;
  472. struct list_head *list;
  473. /*
  474. * Remove pages from lists in a round-robin fashion. A
  475. * batch_free count is maintained that is incremented when an
  476. * empty list is encountered. This is so more pages are freed
  477. * off fuller lists instead of spinning excessively around empty
  478. * lists
  479. */
  480. do {
  481. batch_free++;
  482. if (++migratetype == MIGRATE_PCPTYPES)
  483. migratetype = 0;
  484. list = &pcp->lists[migratetype];
  485. } while (list_empty(list));
  486. do {
  487. page = list_entry(list->prev, struct page, lru);
  488. /* must delete as __free_one_page list manipulates */
  489. list_del(&page->lru);
  490. __free_one_page(page, zone, 0, migratetype);
  491. trace_mm_page_pcpu_drain(page, 0, migratetype);
  492. } while (--count && --batch_free && !list_empty(list));
  493. }
  494. spin_unlock(&zone->lock);
  495. }
  496. static void free_one_page(struct zone *zone, struct page *page, int order,
  497. int migratetype)
  498. {
  499. spin_lock(&zone->lock);
  500. zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
  501. zone->pages_scanned = 0;
  502. __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
  503. __free_one_page(page, zone, order, migratetype);
  504. spin_unlock(&zone->lock);
  505. }
  506. static void __free_pages_ok(struct page *page, unsigned int order)
  507. {
  508. unsigned long flags;
  509. int i;
  510. int bad = 0;
  511. int wasMlocked = __TestClearPageMlocked(page);
  512. kmemcheck_free_shadow(page, order);
  513. for (i = 0 ; i < (1 << order) ; ++i)
  514. bad += free_pages_check(page + i);
  515. if (bad)
  516. return;
  517. if (!PageHighMem(page)) {
  518. debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  519. debug_check_no_obj_freed(page_address(page),
  520. PAGE_SIZE << order);
  521. }
  522. arch_free_page(page, order);
  523. kernel_map_pages(page, 1 << order, 0);
  524. local_irq_save(flags);
  525. if (unlikely(wasMlocked))
  526. free_page_mlock(page);
  527. __count_vm_events(PGFREE, 1 << order);
  528. free_one_page(page_zone(page), page, order,
  529. get_pageblock_migratetype(page));
  530. local_irq_restore(flags);
  531. }
  532. /*
  533. * permit the bootmem allocator to evade page validation on high-order frees
  534. */
  535. void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
  536. {
  537. if (order == 0) {
  538. __ClearPageReserved(page);
  539. set_page_count(page, 0);
  540. set_page_refcounted(page);
  541. __free_page(page);
  542. } else {
  543. int loop;
  544. prefetchw(page);
  545. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  546. struct page *p = &page[loop];
  547. if (loop + 1 < BITS_PER_LONG)
  548. prefetchw(p + 1);
  549. __ClearPageReserved(p);
  550. set_page_count(p, 0);
  551. }
  552. set_page_refcounted(page);
  553. __free_pages(page, order);
  554. }
  555. }
  556. /*
  557. * The order of subdivision here is critical for the IO subsystem.
  558. * Please do not alter this order without good reasons and regression
  559. * testing. Specifically, as large blocks of memory are subdivided,
  560. * the order in which smaller blocks are delivered depends on the order
  561. * they're subdivided in this function. This is the primary factor
  562. * influencing the order in which pages are delivered to the IO
  563. * subsystem according to empirical testing, and this is also justified
  564. * by considering the behavior of a buddy system containing a single
  565. * large block of memory acted on by a series of small allocations.
  566. * This behavior is a critical factor in sglist merging's success.
  567. *
  568. * -- wli
  569. */
  570. static inline void expand(struct zone *zone, struct page *page,
  571. int low, int high, struct free_area *area,
  572. int migratetype)
  573. {
  574. unsigned long size = 1 << high;
  575. while (high > low) {
  576. area--;
  577. high--;
  578. size >>= 1;
  579. VM_BUG_ON(bad_range(zone, &page[size]));
  580. list_add(&page[size].lru, &area->free_list[migratetype]);
  581. area->nr_free++;
  582. set_page_order(&page[size], high);
  583. }
  584. }
  585. /*
  586. * This page is about to be returned from the page allocator
  587. */
  588. static inline int check_new_page(struct page *page)
  589. {
  590. if (unlikely(page_mapcount(page) |
  591. (page->mapping != NULL) |
  592. (atomic_read(&page->_count) != 0) |
  593. (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
  594. bad_page(page);
  595. return 1;
  596. }
  597. return 0;
  598. }
  599. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  600. {
  601. int i;
  602. for (i = 0; i < (1 << order); i++) {
  603. struct page *p = page + i;
  604. if (unlikely(check_new_page(p)))
  605. return 1;
  606. }
  607. set_page_private(page, 0);
  608. set_page_refcounted(page);
  609. arch_alloc_page(page, order);
  610. kernel_map_pages(page, 1 << order, 1);
  611. if (gfp_flags & __GFP_ZERO)
  612. prep_zero_page(page, order, gfp_flags);
  613. if (order && (gfp_flags & __GFP_COMP))
  614. prep_compound_page(page, order);
  615. return 0;
  616. }
  617. /*
  618. * Go through the free lists for the given migratetype and remove
  619. * the smallest available page from the freelists
  620. */
  621. static inline
  622. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  623. int migratetype)
  624. {
  625. unsigned int current_order;
  626. struct free_area * area;
  627. struct page *page;
  628. /* Find a page of the appropriate size in the preferred list */
  629. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  630. area = &(zone->free_area[current_order]);
  631. if (list_empty(&area->free_list[migratetype]))
  632. continue;
  633. page = list_entry(area->free_list[migratetype].next,
  634. struct page, lru);
  635. list_del(&page->lru);
  636. rmv_page_order(page);
  637. area->nr_free--;
  638. expand(zone, page, order, current_order, area, migratetype);
  639. return page;
  640. }
  641. return NULL;
  642. }
  643. /*
  644. * This array describes the order lists are fallen back to when
  645. * the free lists for the desirable migrate type are depleted
  646. */
  647. static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  648. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  649. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  650. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  651. [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
  652. };
  653. /*
  654. * Move the free pages in a range to the free lists of the requested type.
  655. * Note that start_page and end_pages are not aligned on a pageblock
  656. * boundary. If alignment is required, use move_freepages_block()
  657. */
  658. static int move_freepages(struct zone *zone,
  659. struct page *start_page, struct page *end_page,
  660. int migratetype)
  661. {
  662. struct page *page;
  663. unsigned long order;
  664. int pages_moved = 0;
  665. #ifndef CONFIG_HOLES_IN_ZONE
  666. /*
  667. * page_zone is not safe to call in this context when
  668. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  669. * anyway as we check zone boundaries in move_freepages_block().
  670. * Remove at a later date when no bug reports exist related to
  671. * grouping pages by mobility
  672. */
  673. BUG_ON(page_zone(start_page) != page_zone(end_page));
  674. #endif
  675. for (page = start_page; page <= end_page;) {
  676. /* Make sure we are not inadvertently changing nodes */
  677. VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
  678. if (!pfn_valid_within(page_to_pfn(page))) {
  679. page++;
  680. continue;
  681. }
  682. if (!PageBuddy(page)) {
  683. page++;
  684. continue;
  685. }
  686. order = page_order(page);
  687. list_del(&page->lru);
  688. list_add(&page->lru,
  689. &zone->free_area[order].free_list[migratetype]);
  690. page += 1 << order;
  691. pages_moved += 1 << order;
  692. }
  693. return pages_moved;
  694. }
  695. static int move_freepages_block(struct zone *zone, struct page *page,
  696. int migratetype)
  697. {
  698. unsigned long start_pfn, end_pfn;
  699. struct page *start_page, *end_page;
  700. start_pfn = page_to_pfn(page);
  701. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  702. start_page = pfn_to_page(start_pfn);
  703. end_page = start_page + pageblock_nr_pages - 1;
  704. end_pfn = start_pfn + pageblock_nr_pages - 1;
  705. /* Do not cross zone boundaries */
  706. if (start_pfn < zone->zone_start_pfn)
  707. start_page = page;
  708. if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
  709. return 0;
  710. return move_freepages(zone, start_page, end_page, migratetype);
  711. }
  712. static void change_pageblock_range(struct page *pageblock_page,
  713. int start_order, int migratetype)
  714. {
  715. int nr_pageblocks = 1 << (start_order - pageblock_order);
  716. while (nr_pageblocks--) {
  717. set_pageblock_migratetype(pageblock_page, migratetype);
  718. pageblock_page += pageblock_nr_pages;
  719. }
  720. }
  721. /* Remove an element from the buddy allocator from the fallback list */
  722. static inline struct page *
  723. __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  724. {
  725. struct free_area * area;
  726. int current_order;
  727. struct page *page;
  728. int migratetype, i;
  729. /* Find the largest possible block of pages in the other list */
  730. for (current_order = MAX_ORDER-1; current_order >= order;
  731. --current_order) {
  732. for (i = 0; i < MIGRATE_TYPES - 1; i++) {
  733. migratetype = fallbacks[start_migratetype][i];
  734. /* MIGRATE_RESERVE handled later if necessary */
  735. if (migratetype == MIGRATE_RESERVE)
  736. continue;
  737. area = &(zone->free_area[current_order]);
  738. if (list_empty(&area->free_list[migratetype]))
  739. continue;
  740. page = list_entry(area->free_list[migratetype].next,
  741. struct page, lru);
  742. area->nr_free--;
  743. /*
  744. * If breaking a large block of pages, move all free
  745. * pages to the preferred allocation list. If falling
  746. * back for a reclaimable kernel allocation, be more
  747. * agressive about taking ownership of free pages
  748. */
  749. if (unlikely(current_order >= (pageblock_order >> 1)) ||
  750. start_migratetype == MIGRATE_RECLAIMABLE ||
  751. page_group_by_mobility_disabled) {
  752. unsigned long pages;
  753. pages = move_freepages_block(zone, page,
  754. start_migratetype);
  755. /* Claim the whole block if over half of it is free */
  756. if (pages >= (1 << (pageblock_order-1)) ||
  757. page_group_by_mobility_disabled)
  758. set_pageblock_migratetype(page,
  759. start_migratetype);
  760. migratetype = start_migratetype;
  761. }
  762. /* Remove the page from the freelists */
  763. list_del(&page->lru);
  764. rmv_page_order(page);
  765. /* Take ownership for orders >= pageblock_order */
  766. if (current_order >= pageblock_order)
  767. change_pageblock_range(page, current_order,
  768. start_migratetype);
  769. expand(zone, page, order, current_order, area, migratetype);
  770. trace_mm_page_alloc_extfrag(page, order, current_order,
  771. start_migratetype, migratetype);
  772. return page;
  773. }
  774. }
  775. return NULL;
  776. }
  777. /*
  778. * Do the hard work of removing an element from the buddy allocator.
  779. * Call me with the zone->lock already held.
  780. */
  781. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  782. int migratetype)
  783. {
  784. struct page *page;
  785. retry_reserve:
  786. page = __rmqueue_smallest(zone, order, migratetype);
  787. if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
  788. page = __rmqueue_fallback(zone, order, migratetype);
  789. /*
  790. * Use MIGRATE_RESERVE rather than fail an allocation. goto
  791. * is used because __rmqueue_smallest is an inline function
  792. * and we want just one call site
  793. */
  794. if (!page) {
  795. migratetype = MIGRATE_RESERVE;
  796. goto retry_reserve;
  797. }
  798. }
  799. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  800. return page;
  801. }
  802. /*
  803. * Obtain a specified number of elements from the buddy allocator, all under
  804. * a single hold of the lock, for efficiency. Add them to the supplied list.
  805. * Returns the number of new pages which were placed at *list.
  806. */
  807. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  808. unsigned long count, struct list_head *list,
  809. int migratetype, int cold)
  810. {
  811. int i;
  812. spin_lock(&zone->lock);
  813. for (i = 0; i < count; ++i) {
  814. struct page *page = __rmqueue(zone, order, migratetype);
  815. if (unlikely(page == NULL))
  816. break;
  817. /*
  818. * Split buddy pages returned by expand() are received here
  819. * in physical page order. The page is added to the callers and
  820. * list and the list head then moves forward. From the callers
  821. * perspective, the linked list is ordered by page number in
  822. * some conditions. This is useful for IO devices that can
  823. * merge IO requests if the physical pages are ordered
  824. * properly.
  825. */
  826. if (likely(cold == 0))
  827. list_add(&page->lru, list);
  828. else
  829. list_add_tail(&page->lru, list);
  830. set_page_private(page, migratetype);
  831. list = &page->lru;
  832. }
  833. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  834. spin_unlock(&zone->lock);
  835. return i;
  836. }
  837. #ifdef CONFIG_NUMA
  838. /*
  839. * Called from the vmstat counter updater to drain pagesets of this
  840. * currently executing processor on remote nodes after they have
  841. * expired.
  842. *
  843. * Note that this function must be called with the thread pinned to
  844. * a single processor.
  845. */
  846. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  847. {
  848. unsigned long flags;
  849. int to_drain;
  850. local_irq_save(flags);
  851. if (pcp->count >= pcp->batch)
  852. to_drain = pcp->batch;
  853. else
  854. to_drain = pcp->count;
  855. free_pcppages_bulk(zone, to_drain, pcp);
  856. pcp->count -= to_drain;
  857. local_irq_restore(flags);
  858. }
  859. #endif
  860. /*
  861. * Drain pages of the indicated processor.
  862. *
  863. * The processor must either be the current processor and the
  864. * thread pinned to the current processor or a processor that
  865. * is not online.
  866. */
  867. static void drain_pages(unsigned int cpu)
  868. {
  869. unsigned long flags;
  870. struct zone *zone;
  871. for_each_populated_zone(zone) {
  872. struct per_cpu_pageset *pset;
  873. struct per_cpu_pages *pcp;
  874. pset = zone_pcp(zone, cpu);
  875. pcp = &pset->pcp;
  876. local_irq_save(flags);
  877. free_pcppages_bulk(zone, pcp->count, pcp);
  878. pcp->count = 0;
  879. local_irq_restore(flags);
  880. }
  881. }
  882. /*
  883. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  884. */
  885. void drain_local_pages(void *arg)
  886. {
  887. drain_pages(smp_processor_id());
  888. }
  889. /*
  890. * Spill all the per-cpu pages from all CPUs back into the buddy allocator
  891. */
  892. void drain_all_pages(void)
  893. {
  894. on_each_cpu(drain_local_pages, NULL, 1);
  895. }
  896. #ifdef CONFIG_HIBERNATION
  897. void mark_free_pages(struct zone *zone)
  898. {
  899. unsigned long pfn, max_zone_pfn;
  900. unsigned long flags;
  901. int order, t;
  902. struct list_head *curr;
  903. if (!zone->spanned_pages)
  904. return;
  905. spin_lock_irqsave(&zone->lock, flags);
  906. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  907. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  908. if (pfn_valid(pfn)) {
  909. struct page *page = pfn_to_page(pfn);
  910. if (!swsusp_page_is_forbidden(page))
  911. swsusp_unset_page_free(page);
  912. }
  913. for_each_migratetype_order(order, t) {
  914. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  915. unsigned long i;
  916. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  917. for (i = 0; i < (1UL << order); i++)
  918. swsusp_set_page_free(pfn_to_page(pfn + i));
  919. }
  920. }
  921. spin_unlock_irqrestore(&zone->lock, flags);
  922. }
  923. #endif /* CONFIG_PM */
  924. /*
  925. * Free a 0-order page
  926. */
  927. static void free_hot_cold_page(struct page *page, int cold)
  928. {
  929. struct zone *zone = page_zone(page);
  930. struct per_cpu_pages *pcp;
  931. unsigned long flags;
  932. int migratetype;
  933. int wasMlocked = __TestClearPageMlocked(page);
  934. kmemcheck_free_shadow(page, 0);
  935. if (PageAnon(page))
  936. page->mapping = NULL;
  937. if (free_pages_check(page))
  938. return;
  939. if (!PageHighMem(page)) {
  940. debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
  941. debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
  942. }
  943. arch_free_page(page, 0);
  944. kernel_map_pages(page, 1, 0);
  945. pcp = &zone_pcp(zone, get_cpu())->pcp;
  946. migratetype = get_pageblock_migratetype(page);
  947. set_page_private(page, migratetype);
  948. local_irq_save(flags);
  949. if (unlikely(wasMlocked))
  950. free_page_mlock(page);
  951. __count_vm_event(PGFREE);
  952. /*
  953. * We only track unmovable, reclaimable and movable on pcp lists.
  954. * Free ISOLATE pages back to the allocator because they are being
  955. * offlined but treat RESERVE as movable pages so we can get those
  956. * areas back if necessary. Otherwise, we may have to free
  957. * excessively into the page allocator
  958. */
  959. if (migratetype >= MIGRATE_PCPTYPES) {
  960. if (unlikely(migratetype == MIGRATE_ISOLATE)) {
  961. free_one_page(zone, page, 0, migratetype);
  962. goto out;
  963. }
  964. migratetype = MIGRATE_MOVABLE;
  965. }
  966. if (cold)
  967. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  968. else
  969. list_add(&page->lru, &pcp->lists[migratetype]);
  970. pcp->count++;
  971. if (pcp->count >= pcp->high) {
  972. free_pcppages_bulk(zone, pcp->batch, pcp);
  973. pcp->count -= pcp->batch;
  974. }
  975. out:
  976. local_irq_restore(flags);
  977. put_cpu();
  978. }
  979. void free_hot_page(struct page *page)
  980. {
  981. trace_mm_page_free_direct(page, 0);
  982. free_hot_cold_page(page, 0);
  983. }
  984. /*
  985. * split_page takes a non-compound higher-order page, and splits it into
  986. * n (1<<order) sub-pages: page[0..n]
  987. * Each sub-page must be freed individually.
  988. *
  989. * Note: this is probably too low level an operation for use in drivers.
  990. * Please consult with lkml before using this in your driver.
  991. */
  992. void split_page(struct page *page, unsigned int order)
  993. {
  994. int i;
  995. VM_BUG_ON(PageCompound(page));
  996. VM_BUG_ON(!page_count(page));
  997. #ifdef CONFIG_KMEMCHECK
  998. /*
  999. * Split shadow pages too, because free(page[0]) would
  1000. * otherwise free the whole shadow.
  1001. */
  1002. if (kmemcheck_page_is_tracked(page))
  1003. split_page(virt_to_page(page[0].shadow), order);
  1004. #endif
  1005. for (i = 1; i < (1 << order); i++)
  1006. set_page_refcounted(page + i);
  1007. }
  1008. /*
  1009. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  1010. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  1011. * or two.
  1012. */
  1013. static inline
  1014. struct page *buffered_rmqueue(struct zone *preferred_zone,
  1015. struct zone *zone, int order, gfp_t gfp_flags,
  1016. int migratetype)
  1017. {
  1018. unsigned long flags;
  1019. struct page *page;
  1020. int cold = !!(gfp_flags & __GFP_COLD);
  1021. int cpu;
  1022. again:
  1023. cpu = get_cpu();
  1024. if (likely(order == 0)) {
  1025. struct per_cpu_pages *pcp;
  1026. struct list_head *list;
  1027. pcp = &zone_pcp(zone, cpu)->pcp;
  1028. list = &pcp->lists[migratetype];
  1029. local_irq_save(flags);
  1030. if (list_empty(list)) {
  1031. pcp->count += rmqueue_bulk(zone, 0,
  1032. pcp->batch, list,
  1033. migratetype, cold);
  1034. if (unlikely(list_empty(list)))
  1035. goto failed;
  1036. }
  1037. if (cold)
  1038. page = list_entry(list->prev, struct page, lru);
  1039. else
  1040. page = list_entry(list->next, struct page, lru);
  1041. list_del(&page->lru);
  1042. pcp->count--;
  1043. } else {
  1044. if (unlikely(gfp_flags & __GFP_NOFAIL)) {
  1045. /*
  1046. * __GFP_NOFAIL is not to be used in new code.
  1047. *
  1048. * All __GFP_NOFAIL callers should be fixed so that they
  1049. * properly detect and handle allocation failures.
  1050. *
  1051. * We most definitely don't want callers attempting to
  1052. * allocate greater than order-1 page units with
  1053. * __GFP_NOFAIL.
  1054. */
  1055. WARN_ON_ONCE(order > 1);
  1056. }
  1057. spin_lock_irqsave(&zone->lock, flags);
  1058. page = __rmqueue(zone, order, migratetype);
  1059. __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
  1060. spin_unlock(&zone->lock);
  1061. if (!page)
  1062. goto failed;
  1063. }
  1064. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  1065. zone_statistics(preferred_zone, zone);
  1066. local_irq_restore(flags);
  1067. put_cpu();
  1068. VM_BUG_ON(bad_range(zone, page));
  1069. if (prep_new_page(page, order, gfp_flags))
  1070. goto again;
  1071. return page;
  1072. failed:
  1073. local_irq_restore(flags);
  1074. put_cpu();
  1075. return NULL;
  1076. }
  1077. /* The ALLOC_WMARK bits are used as an index to zone->watermark */
  1078. #define ALLOC_WMARK_MIN WMARK_MIN
  1079. #define ALLOC_WMARK_LOW WMARK_LOW
  1080. #define ALLOC_WMARK_HIGH WMARK_HIGH
  1081. #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
  1082. /* Mask to get the watermark bits */
  1083. #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
  1084. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  1085. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  1086. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  1087. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1088. static struct fail_page_alloc_attr {
  1089. struct fault_attr attr;
  1090. u32 ignore_gfp_highmem;
  1091. u32 ignore_gfp_wait;
  1092. u32 min_order;
  1093. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1094. struct dentry *ignore_gfp_highmem_file;
  1095. struct dentry *ignore_gfp_wait_file;
  1096. struct dentry *min_order_file;
  1097. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1098. } fail_page_alloc = {
  1099. .attr = FAULT_ATTR_INITIALIZER,
  1100. .ignore_gfp_wait = 1,
  1101. .ignore_gfp_highmem = 1,
  1102. .min_order = 1,
  1103. };
  1104. static int __init setup_fail_page_alloc(char *str)
  1105. {
  1106. return setup_fault_attr(&fail_page_alloc.attr, str);
  1107. }
  1108. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1109. static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1110. {
  1111. if (order < fail_page_alloc.min_order)
  1112. return 0;
  1113. if (gfp_mask & __GFP_NOFAIL)
  1114. return 0;
  1115. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1116. return 0;
  1117. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1118. return 0;
  1119. return should_fail(&fail_page_alloc.attr, 1 << order);
  1120. }
  1121. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1122. static int __init fail_page_alloc_debugfs(void)
  1123. {
  1124. mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1125. struct dentry *dir;
  1126. int err;
  1127. err = init_fault_attr_dentries(&fail_page_alloc.attr,
  1128. "fail_page_alloc");
  1129. if (err)
  1130. return err;
  1131. dir = fail_page_alloc.attr.dentries.dir;
  1132. fail_page_alloc.ignore_gfp_wait_file =
  1133. debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1134. &fail_page_alloc.ignore_gfp_wait);
  1135. fail_page_alloc.ignore_gfp_highmem_file =
  1136. debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1137. &fail_page_alloc.ignore_gfp_highmem);
  1138. fail_page_alloc.min_order_file =
  1139. debugfs_create_u32("min-order", mode, dir,
  1140. &fail_page_alloc.min_order);
  1141. if (!fail_page_alloc.ignore_gfp_wait_file ||
  1142. !fail_page_alloc.ignore_gfp_highmem_file ||
  1143. !fail_page_alloc.min_order_file) {
  1144. err = -ENOMEM;
  1145. debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
  1146. debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
  1147. debugfs_remove(fail_page_alloc.min_order_file);
  1148. cleanup_fault_attr_dentries(&fail_page_alloc.attr);
  1149. }
  1150. return err;
  1151. }
  1152. late_initcall(fail_page_alloc_debugfs);
  1153. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1154. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1155. static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1156. {
  1157. return 0;
  1158. }
  1159. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1160. /*
  1161. * Return 1 if free pages are above 'mark'. This takes into account the order
  1162. * of the allocation.
  1163. */
  1164. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  1165. int classzone_idx, int alloc_flags)
  1166. {
  1167. /* free_pages my go negative - that's OK */
  1168. long min = mark;
  1169. long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
  1170. int o;
  1171. if (alloc_flags & ALLOC_HIGH)
  1172. min -= min / 2;
  1173. if (alloc_flags & ALLOC_HARDER)
  1174. min -= min / 4;
  1175. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  1176. return 0;
  1177. for (o = 0; o < order; o++) {
  1178. /* At the next order, this order's pages become unavailable */
  1179. free_pages -= z->free_area[o].nr_free << o;
  1180. /* Require fewer higher order pages to be free */
  1181. min >>= 1;
  1182. if (free_pages <= min)
  1183. return 0;
  1184. }
  1185. return 1;
  1186. }
  1187. #ifdef CONFIG_NUMA
  1188. /*
  1189. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1190. * skip over zones that are not allowed by the cpuset, or that have
  1191. * been recently (in last second) found to be nearly full. See further
  1192. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1193. * that have to skip over a lot of full or unallowed zones.
  1194. *
  1195. * If the zonelist cache is present in the passed in zonelist, then
  1196. * returns a pointer to the allowed node mask (either the current
  1197. * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
  1198. *
  1199. * If the zonelist cache is not available for this zonelist, does
  1200. * nothing and returns NULL.
  1201. *
  1202. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1203. * a second since last zap'd) then we zap it out (clear its bits.)
  1204. *
  1205. * We hold off even calling zlc_setup, until after we've checked the
  1206. * first zone in the zonelist, on the theory that most allocations will
  1207. * be satisfied from that first zone, so best to examine that zone as
  1208. * quickly as we can.
  1209. */
  1210. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1211. {
  1212. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1213. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1214. zlc = zonelist->zlcache_ptr;
  1215. if (!zlc)
  1216. return NULL;
  1217. if (time_after(jiffies, zlc->last_full_zap + HZ)) {
  1218. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1219. zlc->last_full_zap = jiffies;
  1220. }
  1221. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1222. &cpuset_current_mems_allowed :
  1223. &node_states[N_HIGH_MEMORY];
  1224. return allowednodes;
  1225. }
  1226. /*
  1227. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1228. * if it is worth looking at further for free memory:
  1229. * 1) Check that the zone isn't thought to be full (doesn't have its
  1230. * bit set in the zonelist_cache fullzones BITMAP).
  1231. * 2) Check that the zones node (obtained from the zonelist_cache
  1232. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1233. * Return true (non-zero) if zone is worth looking at further, or
  1234. * else return false (zero) if it is not.
  1235. *
  1236. * This check -ignores- the distinction between various watermarks,
  1237. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1238. * found to be full for any variation of these watermarks, it will
  1239. * be considered full for up to one second by all requests, unless
  1240. * we are so low on memory on all allowed nodes that we are forced
  1241. * into the second scan of the zonelist.
  1242. *
  1243. * In the second scan we ignore this zonelist cache and exactly
  1244. * apply the watermarks to all zones, even it is slower to do so.
  1245. * We are low on memory in the second scan, and should leave no stone
  1246. * unturned looking for a free page.
  1247. */
  1248. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1249. nodemask_t *allowednodes)
  1250. {
  1251. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1252. int i; /* index of *z in zonelist zones */
  1253. int n; /* node that zone *z is on */
  1254. zlc = zonelist->zlcache_ptr;
  1255. if (!zlc)
  1256. return 1;
  1257. i = z - zonelist->_zonerefs;
  1258. n = zlc->z_to_n[i];
  1259. /* This zone is worth trying if it is allowed but not full */
  1260. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1261. }
  1262. /*
  1263. * Given 'z' scanning a zonelist, set the corresponding bit in
  1264. * zlc->fullzones, so that subsequent attempts to allocate a page
  1265. * from that zone don't waste time re-examining it.
  1266. */
  1267. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1268. {
  1269. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1270. int i; /* index of *z in zonelist zones */
  1271. zlc = zonelist->zlcache_ptr;
  1272. if (!zlc)
  1273. return;
  1274. i = z - zonelist->_zonerefs;
  1275. set_bit(i, zlc->fullzones);
  1276. }
  1277. #else /* CONFIG_NUMA */
  1278. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1279. {
  1280. return NULL;
  1281. }
  1282. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1283. nodemask_t *allowednodes)
  1284. {
  1285. return 1;
  1286. }
  1287. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1288. {
  1289. }
  1290. #endif /* CONFIG_NUMA */
  1291. /*
  1292. * get_page_from_freelist goes through the zonelist trying to allocate
  1293. * a page.
  1294. */
  1295. static struct page *
  1296. get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
  1297. struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
  1298. struct zone *preferred_zone, int migratetype)
  1299. {
  1300. struct zoneref *z;
  1301. struct page *page = NULL;
  1302. int classzone_idx;
  1303. struct zone *zone;
  1304. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1305. int zlc_active = 0; /* set if using zonelist_cache */
  1306. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1307. classzone_idx = zone_idx(preferred_zone);
  1308. zonelist_scan:
  1309. /*
  1310. * Scan zonelist, looking for a zone with enough free.
  1311. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1312. */
  1313. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1314. high_zoneidx, nodemask) {
  1315. if (NUMA_BUILD && zlc_active &&
  1316. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1317. continue;
  1318. if ((alloc_flags & ALLOC_CPUSET) &&
  1319. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1320. goto try_next_zone;
  1321. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  1322. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  1323. unsigned long mark;
  1324. int ret;
  1325. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1326. if (zone_watermark_ok(zone, order, mark,
  1327. classzone_idx, alloc_flags))
  1328. goto try_this_zone;
  1329. if (zone_reclaim_mode == 0)
  1330. goto this_zone_full;
  1331. ret = zone_reclaim(zone, gfp_mask, order);
  1332. switch (ret) {
  1333. case ZONE_RECLAIM_NOSCAN:
  1334. /* did not scan */
  1335. goto try_next_zone;
  1336. case ZONE_RECLAIM_FULL:
  1337. /* scanned but unreclaimable */
  1338. goto this_zone_full;
  1339. default:
  1340. /* did we reclaim enough */
  1341. if (!zone_watermark_ok(zone, order, mark,
  1342. classzone_idx, alloc_flags))
  1343. goto this_zone_full;
  1344. }
  1345. }
  1346. try_this_zone:
  1347. page = buffered_rmqueue(preferred_zone, zone, order,
  1348. gfp_mask, migratetype);
  1349. if (page)
  1350. break;
  1351. this_zone_full:
  1352. if (NUMA_BUILD)
  1353. zlc_mark_zone_full(zonelist, z);
  1354. try_next_zone:
  1355. if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
  1356. /*
  1357. * we do zlc_setup after the first zone is tried but only
  1358. * if there are multiple nodes make it worthwhile
  1359. */
  1360. allowednodes = zlc_setup(zonelist, alloc_flags);
  1361. zlc_active = 1;
  1362. did_zlc_setup = 1;
  1363. }
  1364. }
  1365. if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
  1366. /* Disable zlc cache for second zonelist scan */
  1367. zlc_active = 0;
  1368. goto zonelist_scan;
  1369. }
  1370. return page;
  1371. }
  1372. static inline int
  1373. should_alloc_retry(gfp_t gfp_mask, unsigned int order,
  1374. unsigned long pages_reclaimed)
  1375. {
  1376. /* Do not loop if specifically requested */
  1377. if (gfp_mask & __GFP_NORETRY)
  1378. return 0;
  1379. /*
  1380. * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
  1381. * means __GFP_NOFAIL, but that may not be true in other
  1382. * implementations.
  1383. */
  1384. if (order <= PAGE_ALLOC_COSTLY_ORDER)
  1385. return 1;
  1386. /*
  1387. * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
  1388. * specified, then we retry until we no longer reclaim any pages
  1389. * (above), or we've reclaimed an order of pages at least as
  1390. * large as the allocation's order. In both cases, if the
  1391. * allocation still fails, we stop retrying.
  1392. */
  1393. if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
  1394. return 1;
  1395. /*
  1396. * Don't let big-order allocations loop unless the caller
  1397. * explicitly requests that.
  1398. */
  1399. if (gfp_mask & __GFP_NOFAIL)
  1400. return 1;
  1401. return 0;
  1402. }
  1403. static inline struct page *
  1404. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  1405. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1406. nodemask_t *nodemask, struct zone *preferred_zone,
  1407. int migratetype)
  1408. {
  1409. struct page *page;
  1410. /* Acquire the OOM killer lock for the zones in zonelist */
  1411. if (!try_set_zone_oom(zonelist, gfp_mask)) {
  1412. schedule_timeout_uninterruptible(1);
  1413. return NULL;
  1414. }
  1415. /*
  1416. * Go through the zonelist yet one more time, keep very high watermark
  1417. * here, this is only to catch a parallel oom killing, we must fail if
  1418. * we're still under heavy pressure.
  1419. */
  1420. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
  1421. order, zonelist, high_zoneidx,
  1422. ALLOC_WMARK_HIGH|ALLOC_CPUSET,
  1423. preferred_zone, migratetype);
  1424. if (page)
  1425. goto out;
  1426. if (!(gfp_mask & __GFP_NOFAIL)) {
  1427. /* The OOM killer will not help higher order allocs */
  1428. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1429. goto out;
  1430. /*
  1431. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
  1432. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
  1433. * The caller should handle page allocation failure by itself if
  1434. * it specifies __GFP_THISNODE.
  1435. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
  1436. */
  1437. if (gfp_mask & __GFP_THISNODE)
  1438. goto out;
  1439. }
  1440. /* Exhausted what can be done so it's blamo time */
  1441. out_of_memory(zonelist, gfp_mask, order, nodemask);
  1442. out:
  1443. clear_zonelist_oom(zonelist, gfp_mask);
  1444. return page;
  1445. }
  1446. /* The really slow allocator path where we enter direct reclaim */
  1447. static inline struct page *
  1448. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  1449. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1450. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1451. int migratetype, unsigned long *did_some_progress)
  1452. {
  1453. struct page *page = NULL;
  1454. struct reclaim_state reclaim_state;
  1455. struct task_struct *p = current;
  1456. cond_resched();
  1457. /* We now go into synchronous reclaim */
  1458. cpuset_memory_pressure_bump();
  1459. p->flags |= PF_MEMALLOC;
  1460. lockdep_set_current_reclaim_state(gfp_mask);
  1461. reclaim_state.reclaimed_slab = 0;
  1462. p->reclaim_state = &reclaim_state;
  1463. *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
  1464. p->reclaim_state = NULL;
  1465. lockdep_clear_current_reclaim_state();
  1466. p->flags &= ~PF_MEMALLOC;
  1467. cond_resched();
  1468. if (order != 0)
  1469. drain_all_pages();
  1470. if (likely(*did_some_progress))
  1471. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1472. zonelist, high_zoneidx,
  1473. alloc_flags, preferred_zone,
  1474. migratetype);
  1475. return page;
  1476. }
  1477. /*
  1478. * This is called in the allocator slow-path if the allocation request is of
  1479. * sufficient urgency to ignore watermarks and take other desperate measures
  1480. */
  1481. static inline struct page *
  1482. __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
  1483. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1484. nodemask_t *nodemask, struct zone *preferred_zone,
  1485. int migratetype)
  1486. {
  1487. struct page *page;
  1488. do {
  1489. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1490. zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
  1491. preferred_zone, migratetype);
  1492. if (!page && gfp_mask & __GFP_NOFAIL)
  1493. congestion_wait(BLK_RW_ASYNC, HZ/50);
  1494. } while (!page && (gfp_mask & __GFP_NOFAIL));
  1495. return page;
  1496. }
  1497. static inline
  1498. void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
  1499. enum zone_type high_zoneidx)
  1500. {
  1501. struct zoneref *z;
  1502. struct zone *zone;
  1503. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
  1504. wakeup_kswapd(zone, order);
  1505. }
  1506. static inline int
  1507. gfp_to_alloc_flags(gfp_t gfp_mask)
  1508. {
  1509. struct task_struct *p = current;
  1510. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  1511. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1512. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  1513. BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
  1514. /*
  1515. * The caller may dip into page reserves a bit more if the caller
  1516. * cannot run direct reclaim, or if the caller has realtime scheduling
  1517. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  1518. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  1519. */
  1520. alloc_flags |= (gfp_mask & __GFP_HIGH);
  1521. if (!wait) {
  1522. alloc_flags |= ALLOC_HARDER;
  1523. /*
  1524. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  1525. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1526. */
  1527. alloc_flags &= ~ALLOC_CPUSET;
  1528. } else if (unlikely(rt_task(p)) && !in_interrupt())
  1529. alloc_flags |= ALLOC_HARDER;
  1530. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  1531. if (!in_interrupt() &&
  1532. ((p->flags & PF_MEMALLOC) ||
  1533. unlikely(test_thread_flag(TIF_MEMDIE))))
  1534. alloc_flags |= ALLOC_NO_WATERMARKS;
  1535. }
  1536. return alloc_flags;
  1537. }
  1538. static inline struct page *
  1539. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  1540. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1541. nodemask_t *nodemask, struct zone *preferred_zone,
  1542. int migratetype)
  1543. {
  1544. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1545. struct page *page = NULL;
  1546. int alloc_flags;
  1547. unsigned long pages_reclaimed = 0;
  1548. unsigned long did_some_progress;
  1549. struct task_struct *p = current;
  1550. /*
  1551. * In the slowpath, we sanity check order to avoid ever trying to
  1552. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  1553. * be using allocators in order of preference for an area that is
  1554. * too large.
  1555. */
  1556. if (order >= MAX_ORDER) {
  1557. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  1558. return NULL;
  1559. }
  1560. /*
  1561. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  1562. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  1563. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  1564. * using a larger set of nodes after it has established that the
  1565. * allowed per node queues are empty and that nodes are
  1566. * over allocated.
  1567. */
  1568. if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  1569. goto nopage;
  1570. restart:
  1571. wake_all_kswapd(order, zonelist, high_zoneidx);
  1572. /*
  1573. * OK, we're below the kswapd watermark and have kicked background
  1574. * reclaim. Now things get more complex, so set up alloc_flags according
  1575. * to how we want to proceed.
  1576. */
  1577. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  1578. /* This is the last chance, in general, before the goto nopage. */
  1579. page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
  1580. high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
  1581. preferred_zone, migratetype);
  1582. if (page)
  1583. goto got_pg;
  1584. rebalance:
  1585. /* Allocate without watermarks if the context allows */
  1586. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  1587. page = __alloc_pages_high_priority(gfp_mask, order,
  1588. zonelist, high_zoneidx, nodemask,
  1589. preferred_zone, migratetype);
  1590. if (page)
  1591. goto got_pg;
  1592. }
  1593. /* Atomic allocations - we can't balance anything */
  1594. if (!wait)
  1595. goto nopage;
  1596. /* Avoid recursion of direct reclaim */
  1597. if (p->flags & PF_MEMALLOC)
  1598. goto nopage;
  1599. /* Avoid allocations with no watermarks from looping endlessly */
  1600. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  1601. goto nopage;
  1602. /* Try direct reclaim and then allocating */
  1603. page = __alloc_pages_direct_reclaim(gfp_mask, order,
  1604. zonelist, high_zoneidx,
  1605. nodemask,
  1606. alloc_flags, preferred_zone,
  1607. migratetype, &did_some_progress);
  1608. if (page)
  1609. goto got_pg;
  1610. /*
  1611. * If we failed to make any progress reclaiming, then we are
  1612. * running out of options and have to consider going OOM
  1613. */
  1614. if (!did_some_progress) {
  1615. if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  1616. if (oom_killer_disabled)
  1617. goto nopage;
  1618. page = __alloc_pages_may_oom(gfp_mask, order,
  1619. zonelist, high_zoneidx,
  1620. nodemask, preferred_zone,
  1621. migratetype);
  1622. if (page)
  1623. goto got_pg;
  1624. /*
  1625. * The OOM killer does not trigger for high-order
  1626. * ~__GFP_NOFAIL allocations so if no progress is being
  1627. * made, there are no other options and retrying is
  1628. * unlikely to help.
  1629. */
  1630. if (order > PAGE_ALLOC_COSTLY_ORDER &&
  1631. !(gfp_mask & __GFP_NOFAIL))
  1632. goto nopage;
  1633. goto restart;
  1634. }
  1635. }
  1636. /* Check if we should retry the allocation */
  1637. pages_reclaimed += did_some_progress;
  1638. if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
  1639. /* Wait for some write requests to complete then retry */
  1640. congestion_wait(BLK_RW_ASYNC, HZ/50);
  1641. goto rebalance;
  1642. }
  1643. nopage:
  1644. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  1645. printk(KERN_WARNING "%s: page allocation failure."
  1646. " order:%d, mode:0x%x\n",
  1647. p->comm, order, gfp_mask);
  1648. dump_stack();
  1649. show_mem();
  1650. }
  1651. return page;
  1652. got_pg:
  1653. if (kmemcheck_enabled)
  1654. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  1655. return page;
  1656. }
  1657. /*
  1658. * This is the 'heart' of the zoned buddy allocator.
  1659. */
  1660. struct page *
  1661. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  1662. struct zonelist *zonelist, nodemask_t *nodemask)
  1663. {
  1664. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  1665. struct zone *preferred_zone;
  1666. struct page *page;
  1667. int migratetype = allocflags_to_migratetype(gfp_mask);
  1668. gfp_mask &= gfp_allowed_mask;
  1669. lockdep_trace_alloc(gfp_mask);
  1670. might_sleep_if(gfp_mask & __GFP_WAIT);
  1671. if (should_fail_alloc_page(gfp_mask, order))
  1672. return NULL;
  1673. /*
  1674. * Check the zones suitable for the gfp_mask contain at least one
  1675. * valid zone. It's possible to have an empty zonelist as a result
  1676. * of GFP_THISNODE and a memoryless node
  1677. */
  1678. if (unlikely(!zonelist->_zonerefs->zone))
  1679. return NULL;
  1680. /* The preferred zone is used for statistics later */
  1681. first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
  1682. if (!preferred_zone)
  1683. return NULL;
  1684. /* First allocation attempt */
  1685. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  1686. zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
  1687. preferred_zone, migratetype);
  1688. if (unlikely(!page))
  1689. page = __alloc_pages_slowpath(gfp_mask, order,
  1690. zonelist, high_zoneidx, nodemask,
  1691. preferred_zone, migratetype);
  1692. trace_mm_page_alloc(page, order, gfp_mask, migratetype);
  1693. return page;
  1694. }
  1695. EXPORT_SYMBOL(__alloc_pages_nodemask);
  1696. /*
  1697. * Common helper functions.
  1698. */
  1699. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  1700. {
  1701. struct page *page;
  1702. /*
  1703. * __get_free_pages() returns a 32-bit address, which cannot represent
  1704. * a highmem page
  1705. */
  1706. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  1707. page = alloc_pages(gfp_mask, order);
  1708. if (!page)
  1709. return 0;
  1710. return (unsigned long) page_address(page);
  1711. }
  1712. EXPORT_SYMBOL(__get_free_pages);
  1713. unsigned long get_zeroed_page(gfp_t gfp_mask)
  1714. {
  1715. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  1716. }
  1717. EXPORT_SYMBOL(get_zeroed_page);
  1718. void __pagevec_free(struct pagevec *pvec)
  1719. {
  1720. int i = pagevec_count(pvec);
  1721. while (--i >= 0) {
  1722. trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
  1723. free_hot_cold_page(pvec->pages[i], pvec->cold);
  1724. }
  1725. }
  1726. void __free_pages(struct page *page, unsigned int order)
  1727. {
  1728. if (put_page_testzero(page)) {
  1729. trace_mm_page_free_direct(page, order);
  1730. if (order == 0)
  1731. free_hot_page(page);
  1732. else
  1733. __free_pages_ok(page, order);
  1734. }
  1735. }
  1736. EXPORT_SYMBOL(__free_pages);
  1737. void free_pages(unsigned long addr, unsigned int order)
  1738. {
  1739. if (addr != 0) {
  1740. VM_BUG_ON(!virt_addr_valid((void *)addr));
  1741. __free_pages(virt_to_page((void *)addr), order);
  1742. }
  1743. }
  1744. EXPORT_SYMBOL(free_pages);
  1745. /**
  1746. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  1747. * @size: the number of bytes to allocate
  1748. * @gfp_mask: GFP flags for the allocation
  1749. *
  1750. * This function is similar to alloc_pages(), except that it allocates the
  1751. * minimum number of pages to satisfy the request. alloc_pages() can only
  1752. * allocate memory in power-of-two pages.
  1753. *
  1754. * This function is also limited by MAX_ORDER.
  1755. *
  1756. * Memory allocated by this function must be released by free_pages_exact().
  1757. */
  1758. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  1759. {
  1760. unsigned int order = get_order(size);
  1761. unsigned long addr;
  1762. addr = __get_free_pages(gfp_mask, order);
  1763. if (addr) {
  1764. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  1765. unsigned long used = addr + PAGE_ALIGN(size);
  1766. split_page(virt_to_page((void *)addr), order);
  1767. while (used < alloc_end) {
  1768. free_page(used);
  1769. used += PAGE_SIZE;
  1770. }
  1771. }
  1772. return (void *)addr;
  1773. }
  1774. EXPORT_SYMBOL(alloc_pages_exact);
  1775. /**
  1776. * free_pages_exact - release memory allocated via alloc_pages_exact()
  1777. * @virt: the value returned by alloc_pages_exact.
  1778. * @size: size of allocation, same value as passed to alloc_pages_exact().
  1779. *
  1780. * Release the memory allocated by a previous call to alloc_pages_exact.
  1781. */
  1782. void free_pages_exact(void *virt, size_t size)
  1783. {
  1784. unsigned long addr = (unsigned long)virt;
  1785. unsigned long end = addr + PAGE_ALIGN(size);
  1786. while (addr < end) {
  1787. free_page(addr);
  1788. addr += PAGE_SIZE;
  1789. }
  1790. }
  1791. EXPORT_SYMBOL(free_pages_exact);
  1792. static unsigned int nr_free_zone_pages(int offset)
  1793. {
  1794. struct zoneref *z;
  1795. struct zone *zone;
  1796. /* Just pick one node, since fallback list is circular */
  1797. unsigned int sum = 0;
  1798. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  1799. for_each_zone_zonelist(zone, z, zonelist, offset) {
  1800. unsigned long size = zone->present_pages;
  1801. unsigned long high = high_wmark_pages(zone);
  1802. if (size > high)
  1803. sum += size - high;
  1804. }
  1805. return sum;
  1806. }
  1807. /*
  1808. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1809. */
  1810. unsigned int nr_free_buffer_pages(void)
  1811. {
  1812. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1813. }
  1814. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  1815. /*
  1816. * Amount of free RAM allocatable within all zones
  1817. */
  1818. unsigned int nr_free_pagecache_pages(void)
  1819. {
  1820. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  1821. }
  1822. static inline void show_node(struct zone *zone)
  1823. {
  1824. if (NUMA_BUILD)
  1825. printk("Node %d ", zone_to_nid(zone));
  1826. }
  1827. void si_meminfo(struct sysinfo *val)
  1828. {
  1829. val->totalram = totalram_pages;
  1830. val->sharedram = 0;
  1831. val->freeram = global_page_state(NR_FREE_PAGES);
  1832. val->bufferram = nr_blockdev_pages();
  1833. val->totalhigh = totalhigh_pages;
  1834. val->freehigh = nr_free_highpages();
  1835. val->mem_unit = PAGE_SIZE;
  1836. }
  1837. EXPORT_SYMBOL(si_meminfo);
  1838. #ifdef CONFIG_NUMA
  1839. void si_meminfo_node(struct sysinfo *val, int nid)
  1840. {
  1841. pg_data_t *pgdat = NODE_DATA(nid);
  1842. val->totalram = pgdat->node_present_pages;
  1843. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  1844. #ifdef CONFIG_HIGHMEM
  1845. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1846. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  1847. NR_FREE_PAGES);
  1848. #else
  1849. val->totalhigh = 0;
  1850. val->freehigh = 0;
  1851. #endif
  1852. val->mem_unit = PAGE_SIZE;
  1853. }
  1854. #endif
  1855. #define K(x) ((x) << (PAGE_SHIFT-10))
  1856. /*
  1857. * Show free area list (used inside shift_scroll-lock stuff)
  1858. * We also calculate the percentage fragmentation. We do this by counting the
  1859. * memory on each free list with the exception of the first item on the list.
  1860. */
  1861. void show_free_areas(void)
  1862. {
  1863. int cpu;
  1864. struct zone *zone;
  1865. for_each_populated_zone(zone) {
  1866. show_node(zone);
  1867. printk("%s per-cpu:\n", zone->name);
  1868. for_each_online_cpu(cpu) {
  1869. struct per_cpu_pageset *pageset;
  1870. pageset = zone_pcp(zone, cpu);
  1871. printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
  1872. cpu, pageset->pcp.high,
  1873. pageset->pcp.batch, pageset->pcp.count);
  1874. }
  1875. }
  1876. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  1877. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  1878. " unevictable:%lu"
  1879. " dirty:%lu writeback:%lu unstable:%lu\n"
  1880. " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  1881. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
  1882. global_page_state(NR_ACTIVE_ANON),
  1883. global_page_state(NR_INACTIVE_ANON),
  1884. global_page_state(NR_ISOLATED_ANON),
  1885. global_page_state(NR_ACTIVE_FILE),
  1886. global_page_state(NR_INACTIVE_FILE),
  1887. global_page_state(NR_ISOLATED_FILE),
  1888. global_page_state(NR_UNEVICTABLE),
  1889. global_page_state(NR_FILE_DIRTY),
  1890. global_page_state(NR_WRITEBACK),
  1891. global_page_state(NR_UNSTABLE_NFS),
  1892. global_page_state(NR_FREE_PAGES),
  1893. global_page_state(NR_SLAB_RECLAIMABLE),
  1894. global_page_state(NR_SLAB_UNRECLAIMABLE),
  1895. global_page_state(NR_FILE_MAPPED),
  1896. global_page_state(NR_SHMEM),
  1897. global_page_state(NR_PAGETABLE),
  1898. global_page_state(NR_BOUNCE));
  1899. for_each_populated_zone(zone) {
  1900. int i;
  1901. show_node(zone);
  1902. printk("%s"
  1903. " free:%lukB"
  1904. " min:%lukB"
  1905. " low:%lukB"
  1906. " high:%lukB"
  1907. " active_anon:%lukB"
  1908. " inactive_anon:%lukB"
  1909. " active_file:%lukB"
  1910. " inactive_file:%lukB"
  1911. " unevictable:%lukB"
  1912. " isolated(anon):%lukB"
  1913. " isolated(file):%lukB"
  1914. " present:%lukB"
  1915. " mlocked:%lukB"
  1916. " dirty:%lukB"
  1917. " writeback:%lukB"
  1918. " mapped:%lukB"
  1919. " shmem:%lukB"
  1920. " slab_reclaimable:%lukB"
  1921. " slab_unreclaimable:%lukB"
  1922. " kernel_stack:%lukB"
  1923. " pagetables:%lukB"
  1924. " unstable:%lukB"
  1925. " bounce:%lukB"
  1926. " writeback_tmp:%lukB"
  1927. " pages_scanned:%lu"
  1928. " all_unreclaimable? %s"
  1929. "\n",
  1930. zone->name,
  1931. K(zone_page_state(zone, NR_FREE_PAGES)),
  1932. K(min_wmark_pages(zone)),
  1933. K(low_wmark_pages(zone)),
  1934. K(high_wmark_pages(zone)),
  1935. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  1936. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  1937. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  1938. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  1939. K(zone_page_state(zone, NR_UNEVICTABLE)),
  1940. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  1941. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  1942. K(zone->present_pages),
  1943. K(zone_page_state(zone, NR_MLOCK)),
  1944. K(zone_page_state(zone, NR_FILE_DIRTY)),
  1945. K(zone_page_state(zone, NR_WRITEBACK)),
  1946. K(zone_page_state(zone, NR_FILE_MAPPED)),
  1947. K(zone_page_state(zone, NR_SHMEM)),
  1948. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  1949. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  1950. zone_page_state(zone, NR_KERNEL_STACK) *
  1951. THREAD_SIZE / 1024,
  1952. K(zone_page_state(zone, NR_PAGETABLE)),
  1953. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  1954. K(zone_page_state(zone, NR_BOUNCE)),
  1955. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  1956. zone->pages_scanned,
  1957. (zone_is_all_unreclaimable(zone) ? "yes" : "no")
  1958. );
  1959. printk("lowmem_reserve[]:");
  1960. for (i = 0; i < MAX_NR_ZONES; i++)
  1961. printk(" %lu", zone->lowmem_reserve[i]);
  1962. printk("\n");
  1963. }
  1964. for_each_populated_zone(zone) {
  1965. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  1966. show_node(zone);
  1967. printk("%s: ", zone->name);
  1968. spin_lock_irqsave(&zone->lock, flags);
  1969. for (order = 0; order < MAX_ORDER; order++) {
  1970. nr[order] = zone->free_area[order].nr_free;
  1971. total += nr[order] << order;
  1972. }
  1973. spin_unlock_irqrestore(&zone->lock, flags);
  1974. for (order = 0; order < MAX_ORDER; order++)
  1975. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  1976. printk("= %lukB\n", K(total));
  1977. }
  1978. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  1979. show_swap_cache_info();
  1980. }
  1981. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  1982. {
  1983. zoneref->zone = zone;
  1984. zoneref->zone_idx = zone_idx(zone);
  1985. }
  1986. /*
  1987. * Builds allocation fallback zone lists.
  1988. *
  1989. * Add all populated zones of a node to the zonelist.
  1990. */
  1991. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  1992. int nr_zones, enum zone_type zone_type)
  1993. {
  1994. struct zone *zone;
  1995. BUG_ON(zone_type >= MAX_NR_ZONES);
  1996. zone_type++;
  1997. do {
  1998. zone_type--;
  1999. zone = pgdat->node_zones + zone_type;
  2000. if (populated_zone(zone)) {
  2001. zoneref_set_zone(zone,
  2002. &zonelist->_zonerefs[nr_zones++]);
  2003. check_highest_zone(zone_type);
  2004. }
  2005. } while (zone_type);
  2006. return nr_zones;
  2007. }
  2008. /*
  2009. * zonelist_order:
  2010. * 0 = automatic detection of better ordering.
  2011. * 1 = order by ([node] distance, -zonetype)
  2012. * 2 = order by (-zonetype, [node] distance)
  2013. *
  2014. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  2015. * the same zonelist. So only NUMA can configure this param.
  2016. */
  2017. #define ZONELIST_ORDER_DEFAULT 0
  2018. #define ZONELIST_ORDER_NODE 1
  2019. #define ZONELIST_ORDER_ZONE 2
  2020. /* zonelist order in the kernel.
  2021. * set_zonelist_order() will set this to NODE or ZONE.
  2022. */
  2023. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2024. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  2025. #ifdef CONFIG_NUMA
  2026. /* The value user specified ....changed by config */
  2027. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2028. /* string for sysctl */
  2029. #define NUMA_ZONELIST_ORDER_LEN 16
  2030. char numa_zonelist_order[16] = "default";
  2031. /*
  2032. * interface for configure zonelist ordering.
  2033. * command line option "numa_zonelist_order"
  2034. * = "[dD]efault - default, automatic configuration.
  2035. * = "[nN]ode - order by node locality, then by zone within node
  2036. * = "[zZ]one - order by zone, then by locality within zone
  2037. */
  2038. static int __parse_numa_zonelist_order(char *s)
  2039. {
  2040. if (*s == 'd' || *s == 'D') {
  2041. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2042. } else if (*s == 'n' || *s == 'N') {
  2043. user_zonelist_order = ZONELIST_ORDER_NODE;
  2044. } else if (*s == 'z' || *s == 'Z') {
  2045. user_zonelist_order = ZONELIST_ORDER_ZONE;
  2046. } else {
  2047. printk(KERN_WARNING
  2048. "Ignoring invalid numa_zonelist_order value: "
  2049. "%s\n", s);
  2050. return -EINVAL;
  2051. }
  2052. return 0;
  2053. }
  2054. static __init int setup_numa_zonelist_order(char *s)
  2055. {
  2056. if (s)
  2057. return __parse_numa_zonelist_order(s);
  2058. return 0;
  2059. }
  2060. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  2061. /*
  2062. * sysctl handler for numa_zonelist_order
  2063. */
  2064. int numa_zonelist_order_handler(ctl_table *table, int write,
  2065. void __user *buffer, size_t *length,
  2066. loff_t *ppos)
  2067. {
  2068. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  2069. int ret;
  2070. if (write)
  2071. strncpy(saved_string, (char*)table->data,
  2072. NUMA_ZONELIST_ORDER_LEN);
  2073. ret = proc_dostring(table, write, buffer, length, ppos);
  2074. if (ret)
  2075. return ret;
  2076. if (write) {
  2077. int oldval = user_zonelist_order;
  2078. if (__parse_numa_zonelist_order((char*)table->data)) {
  2079. /*
  2080. * bogus value. restore saved string
  2081. */
  2082. strncpy((char*)table->data, saved_string,
  2083. NUMA_ZONELIST_ORDER_LEN);
  2084. user_zonelist_order = oldval;
  2085. } else if (oldval != user_zonelist_order)
  2086. build_all_zonelists();
  2087. }
  2088. return 0;
  2089. }
  2090. #define MAX_NODE_LOAD (nr_online_nodes)
  2091. static int node_load[MAX_NUMNODES];
  2092. /**
  2093. * find_next_best_node - find the next node that should appear in a given node's fallback list
  2094. * @node: node whose fallback list we're appending
  2095. * @used_node_mask: nodemask_t of already used nodes
  2096. *
  2097. * We use a number of factors to determine which is the next node that should
  2098. * appear on a given node's fallback list. The node should not have appeared
  2099. * already in @node's fallback list, and it should be the next closest node
  2100. * according to the distance array (which contains arbitrary distance values
  2101. * from each node to each node in the system), and should also prefer nodes
  2102. * with no CPUs, since presumably they'll have very little allocation pressure
  2103. * on them otherwise.
  2104. * It returns -1 if no node is found.
  2105. */
  2106. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  2107. {
  2108. int n, val;
  2109. int min_val = INT_MAX;
  2110. int best_node = -1;
  2111. const struct cpumask *tmp = cpumask_of_node(0);
  2112. /* Use the local node if we haven't already */
  2113. if (!node_isset(node, *used_node_mask)) {
  2114. node_set(node, *used_node_mask);
  2115. return node;
  2116. }
  2117. for_each_node_state(n, N_HIGH_MEMORY) {
  2118. /* Don't want a node to appear more than once */
  2119. if (node_isset(n, *used_node_mask))
  2120. continue;
  2121. /* Use the distance array to find the distance */
  2122. val = node_distance(node, n);
  2123. /* Penalize nodes under us ("prefer the next node") */
  2124. val += (n < node);
  2125. /* Give preference to headless and unused nodes */
  2126. tmp = cpumask_of_node(n);
  2127. if (!cpumask_empty(tmp))
  2128. val += PENALTY_FOR_NODE_WITH_CPUS;
  2129. /* Slight preference for less loaded node */
  2130. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  2131. val += node_load[n];
  2132. if (val < min_val) {
  2133. min_val = val;
  2134. best_node = n;
  2135. }
  2136. }
  2137. if (best_node >= 0)
  2138. node_set(best_node, *used_node_mask);
  2139. return best_node;
  2140. }
  2141. /*
  2142. * Build zonelists ordered by node and zones within node.
  2143. * This results in maximum locality--normal zone overflows into local
  2144. * DMA zone, if any--but risks exhausting DMA zone.
  2145. */
  2146. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  2147. {
  2148. int j;
  2149. struct zonelist *zonelist;
  2150. zonelist = &pgdat->node_zonelists[0];
  2151. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  2152. ;
  2153. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2154. MAX_NR_ZONES - 1);
  2155. zonelist->_zonerefs[j].zone = NULL;
  2156. zonelist->_zonerefs[j].zone_idx = 0;
  2157. }
  2158. /*
  2159. * Build gfp_thisnode zonelists
  2160. */
  2161. static void build_thisnode_zonelists(pg_data_t *pgdat)
  2162. {
  2163. int j;
  2164. struct zonelist *zonelist;
  2165. zonelist = &pgdat->node_zonelists[1];
  2166. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2167. zonelist->_zonerefs[j].zone = NULL;
  2168. zonelist->_zonerefs[j].zone_idx = 0;
  2169. }
  2170. /*
  2171. * Build zonelists ordered by zone and nodes within zones.
  2172. * This results in conserving DMA zone[s] until all Normal memory is
  2173. * exhausted, but results in overflowing to remote node while memory
  2174. * may still exist in local DMA zone.
  2175. */
  2176. static int node_order[MAX_NUMNODES];
  2177. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  2178. {
  2179. int pos, j, node;
  2180. int zone_type; /* needs to be signed */
  2181. struct zone *z;
  2182. struct zonelist *zonelist;
  2183. zonelist = &pgdat->node_zonelists[0];
  2184. pos = 0;
  2185. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  2186. for (j = 0; j < nr_nodes; j++) {
  2187. node = node_order[j];
  2188. z = &NODE_DATA(node)->node_zones[zone_type];
  2189. if (populated_zone(z)) {
  2190. zoneref_set_zone(z,
  2191. &zonelist->_zonerefs[pos++]);
  2192. check_highest_zone(zone_type);
  2193. }
  2194. }
  2195. }
  2196. zonelist->_zonerefs[pos].zone = NULL;
  2197. zonelist->_zonerefs[pos].zone_idx = 0;
  2198. }
  2199. static int default_zonelist_order(void)
  2200. {
  2201. int nid, zone_type;
  2202. unsigned long low_kmem_size,total_size;
  2203. struct zone *z;
  2204. int average_size;
  2205. /*
  2206. * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
  2207. * If they are really small and used heavily, the system can fall
  2208. * into OOM very easily.
  2209. * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
  2210. */
  2211. /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
  2212. low_kmem_size = 0;
  2213. total_size = 0;
  2214. for_each_online_node(nid) {
  2215. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2216. z = &NODE_DATA(nid)->node_zones[zone_type];
  2217. if (populated_zone(z)) {
  2218. if (zone_type < ZONE_NORMAL)
  2219. low_kmem_size += z->present_pages;
  2220. total_size += z->present_pages;
  2221. }
  2222. }
  2223. }
  2224. if (!low_kmem_size || /* there are no DMA area. */
  2225. low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
  2226. return ZONELIST_ORDER_NODE;
  2227. /*
  2228. * look into each node's config.
  2229. * If there is a node whose DMA/DMA32 memory is very big area on
  2230. * local memory, NODE_ORDER may be suitable.
  2231. */
  2232. average_size = total_size /
  2233. (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
  2234. for_each_online_node(nid) {
  2235. low_kmem_size = 0;
  2236. total_size = 0;
  2237. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2238. z = &NODE_DATA(nid)->node_zones[zone_type];
  2239. if (populated_zone(z)) {
  2240. if (zone_type < ZONE_NORMAL)
  2241. low_kmem_size += z->present_pages;
  2242. total_size += z->present_pages;
  2243. }
  2244. }
  2245. if (low_kmem_size &&
  2246. total_size > average_size && /* ignore small node */
  2247. low_kmem_size > total_size * 70/100)
  2248. return ZONELIST_ORDER_NODE;
  2249. }
  2250. return ZONELIST_ORDER_ZONE;
  2251. }
  2252. static void set_zonelist_order(void)
  2253. {
  2254. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  2255. current_zonelist_order = default_zonelist_order();
  2256. else
  2257. current_zonelist_order = user_zonelist_order;
  2258. }
  2259. static void build_zonelists(pg_data_t *pgdat)
  2260. {
  2261. int j, node, load;
  2262. enum zone_type i;
  2263. nodemask_t used_mask;
  2264. int local_node, prev_node;
  2265. struct zonelist *zonelist;
  2266. int order = current_zonelist_order;
  2267. /* initialize zonelists */
  2268. for (i = 0; i < MAX_ZONELISTS; i++) {
  2269. zonelist = pgdat->node_zonelists + i;
  2270. zonelist->_zonerefs[0].zone = NULL;
  2271. zonelist->_zonerefs[0].zone_idx = 0;
  2272. }
  2273. /* NUMA-aware ordering of nodes */
  2274. local_node = pgdat->node_id;
  2275. load = nr_online_nodes;
  2276. prev_node = local_node;
  2277. nodes_clear(used_mask);
  2278. memset(node_order, 0, sizeof(node_order));
  2279. j = 0;
  2280. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  2281. int distance = node_distance(local_node, node);
  2282. /*
  2283. * If another node is sufficiently far away then it is better
  2284. * to reclaim pages in a zone before going off node.
  2285. */
  2286. if (distance > RECLAIM_DISTANCE)
  2287. zone_reclaim_mode = 1;
  2288. /*
  2289. * We don't want to pressure a particular node.
  2290. * So adding penalty to the first node in same
  2291. * distance group to make it round-robin.
  2292. */
  2293. if (distance != node_distance(local_node, prev_node))
  2294. node_load[node] = load;
  2295. prev_node = node;
  2296. load--;
  2297. if (order == ZONELIST_ORDER_NODE)
  2298. build_zonelists_in_node_order(pgdat, node);
  2299. else
  2300. node_order[j++] = node; /* remember order */
  2301. }
  2302. if (order == ZONELIST_ORDER_ZONE) {
  2303. /* calculate node order -- i.e., DMA last! */
  2304. build_zonelists_in_zone_order(pgdat, j);
  2305. }
  2306. build_thisnode_zonelists(pgdat);
  2307. }
  2308. /* Construct the zonelist performance cache - see further mmzone.h */
  2309. static void build_zonelist_cache(pg_data_t *pgdat)
  2310. {
  2311. struct zonelist *zonelist;
  2312. struct zonelist_cache *zlc;
  2313. struct zoneref *z;
  2314. zonelist = &pgdat->node_zonelists[0];
  2315. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  2316. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  2317. for (z = zonelist->_zonerefs; z->zone; z++)
  2318. zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
  2319. }
  2320. #else /* CONFIG_NUMA */
  2321. static void set_zonelist_order(void)
  2322. {
  2323. current_zonelist_order = ZONELIST_ORDER_ZONE;
  2324. }
  2325. static void build_zonelists(pg_data_t *pgdat)
  2326. {
  2327. int node, local_node;
  2328. enum zone_type j;
  2329. struct zonelist *zonelist;
  2330. local_node = pgdat->node_id;
  2331. zonelist = &pgdat->node_zonelists[0];
  2332. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2333. /*
  2334. * Now we build the zonelist so that it contains the zones
  2335. * of all the other nodes.
  2336. * We don't want to pressure a particular node, so when
  2337. * building the zones for node N, we make sure that the
  2338. * zones coming right after the local ones are those from
  2339. * node N+1 (modulo N)
  2340. */
  2341. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  2342. if (!node_online(node))
  2343. continue;
  2344. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2345. MAX_NR_ZONES - 1);
  2346. }
  2347. for (node = 0; node < local_node; node++) {
  2348. if (!node_online(node))
  2349. continue;
  2350. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2351. MAX_NR_ZONES - 1);
  2352. }
  2353. zonelist->_zonerefs[j].zone = NULL;
  2354. zonelist->_zonerefs[j].zone_idx = 0;
  2355. }
  2356. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  2357. static void build_zonelist_cache(pg_data_t *pgdat)
  2358. {
  2359. pgdat->node_zonelists[0].zlcache_ptr = NULL;
  2360. }
  2361. #endif /* CONFIG_NUMA */
  2362. /* return values int ....just for stop_machine() */
  2363. static int __build_all_zonelists(void *dummy)
  2364. {
  2365. int nid;
  2366. #ifdef CONFIG_NUMA
  2367. memset(node_load, 0, sizeof(node_load));
  2368. #endif
  2369. for_each_online_node(nid) {
  2370. pg_data_t *pgdat = NODE_DATA(nid);
  2371. build_zonelists(pgdat);
  2372. build_zonelist_cache(pgdat);
  2373. }
  2374. return 0;
  2375. }
  2376. void build_all_zonelists(void)
  2377. {
  2378. set_zonelist_order();
  2379. if (system_state == SYSTEM_BOOTING) {
  2380. __build_all_zonelists(NULL);
  2381. mminit_verify_zonelist();
  2382. cpuset_init_current_mems_allowed();
  2383. } else {
  2384. /* we have to stop all cpus to guarantee there is no user
  2385. of zonelist */
  2386. stop_machine(__build_all_zonelists, NULL, NULL);
  2387. /* cpuset refresh routine should be here */
  2388. }
  2389. vm_total_pages = nr_free_pagecache_pages();
  2390. /*
  2391. * Disable grouping by mobility if the number of pages in the
  2392. * system is too low to allow the mechanism to work. It would be
  2393. * more accurate, but expensive to check per-zone. This check is
  2394. * made on memory-hotadd so a system can start with mobility
  2395. * disabled and enable it later
  2396. */
  2397. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  2398. page_group_by_mobility_disabled = 1;
  2399. else
  2400. page_group_by_mobility_disabled = 0;
  2401. printk("Built %i zonelists in %s order, mobility grouping %s. "
  2402. "Total pages: %ld\n",
  2403. nr_online_nodes,
  2404. zonelist_order_name[current_zonelist_order],
  2405. page_group_by_mobility_disabled ? "off" : "on",
  2406. vm_total_pages);
  2407. #ifdef CONFIG_NUMA
  2408. printk("Policy zone: %s\n", zone_names[policy_zone]);
  2409. #endif
  2410. }
  2411. /*
  2412. * Helper functions to size the waitqueue hash table.
  2413. * Essentially these want to choose hash table sizes sufficiently
  2414. * large so that collisions trying to wait on pages are rare.
  2415. * But in fact, the number of active page waitqueues on typical
  2416. * systems is ridiculously low, less than 200. So this is even
  2417. * conservative, even though it seems large.
  2418. *
  2419. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  2420. * waitqueues, i.e. the size of the waitq table given the number of pages.
  2421. */
  2422. #define PAGES_PER_WAITQUEUE 256
  2423. #ifndef CONFIG_MEMORY_HOTPLUG
  2424. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2425. {
  2426. unsigned long size = 1;
  2427. pages /= PAGES_PER_WAITQUEUE;
  2428. while (size < pages)
  2429. size <<= 1;
  2430. /*
  2431. * Once we have dozens or even hundreds of threads sleeping
  2432. * on IO we've got bigger problems than wait queue collision.
  2433. * Limit the size of the wait table to a reasonable size.
  2434. */
  2435. size = min(size, 4096UL);
  2436. return max(size, 4UL);
  2437. }
  2438. #else
  2439. /*
  2440. * A zone's size might be changed by hot-add, so it is not possible to determine
  2441. * a suitable size for its wait_table. So we use the maximum size now.
  2442. *
  2443. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  2444. *
  2445. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  2446. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  2447. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  2448. *
  2449. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  2450. * or more by the traditional way. (See above). It equals:
  2451. *
  2452. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  2453. * ia64(16K page size) : = ( 8G + 4M)byte.
  2454. * powerpc (64K page size) : = (32G +16M)byte.
  2455. */
  2456. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2457. {
  2458. return 4096UL;
  2459. }
  2460. #endif
  2461. /*
  2462. * This is an integer logarithm so that shifts can be used later
  2463. * to extract the more random high bits from the multiplicative
  2464. * hash function before the remainder is taken.
  2465. */
  2466. static inline unsigned long wait_table_bits(unsigned long size)
  2467. {
  2468. return ffz(~size);
  2469. }
  2470. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  2471. /*
  2472. * Mark a number of pageblocks as MIGRATE_RESERVE. The number
  2473. * of blocks reserved is based on min_wmark_pages(zone). The memory within
  2474. * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  2475. * higher will lead to a bigger reserve which will get freed as contiguous
  2476. * blocks as reclaim kicks in
  2477. */
  2478. static void setup_zone_migrate_reserve(struct zone *zone)
  2479. {
  2480. unsigned long start_pfn, pfn, end_pfn;
  2481. struct page *page;
  2482. unsigned long block_migratetype;
  2483. int reserve;
  2484. /* Get the start pfn, end pfn and the number of blocks to reserve */
  2485. start_pfn = zone->zone_start_pfn;
  2486. end_pfn = start_pfn + zone->spanned_pages;
  2487. reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
  2488. pageblock_order;
  2489. /*
  2490. * Reserve blocks are generally in place to help high-order atomic
  2491. * allocations that are short-lived. A min_free_kbytes value that
  2492. * would result in more than 2 reserve blocks for atomic allocations
  2493. * is assumed to be in place to help anti-fragmentation for the
  2494. * future allocation of hugepages at runtime.
  2495. */
  2496. reserve = min(2, reserve);
  2497. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  2498. if (!pfn_valid(pfn))
  2499. continue;
  2500. page = pfn_to_page(pfn);
  2501. /* Watch out for overlapping nodes */
  2502. if (page_to_nid(page) != zone_to_nid(zone))
  2503. continue;
  2504. /* Blocks with reserved pages will never free, skip them. */
  2505. if (PageReserved(page))
  2506. continue;
  2507. block_migratetype = get_pageblock_migratetype(page);
  2508. /* If this block is reserved, account for it */
  2509. if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
  2510. reserve--;
  2511. continue;
  2512. }
  2513. /* Suitable for reserving if this block is movable */
  2514. if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
  2515. set_pageblock_migratetype(page, MIGRATE_RESERVE);
  2516. move_freepages_block(zone, page, MIGRATE_RESERVE);
  2517. reserve--;
  2518. continue;
  2519. }
  2520. /*
  2521. * If the reserve is met and this is a previous reserved block,
  2522. * take it back
  2523. */
  2524. if (block_migratetype == MIGRATE_RESERVE) {
  2525. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2526. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  2527. }
  2528. }
  2529. }
  2530. /*
  2531. * Initially all pages are reserved - free ones are freed
  2532. * up by free_all_bootmem() once the early boot process is
  2533. * done. Non-atomic initialization, single-pass.
  2534. */
  2535. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  2536. unsigned long start_pfn, enum memmap_context context)
  2537. {
  2538. struct page *page;
  2539. unsigned long end_pfn = start_pfn + size;
  2540. unsigned long pfn;
  2541. struct zone *z;
  2542. if (highest_memmap_pfn < end_pfn - 1)
  2543. highest_memmap_pfn = end_pfn - 1;
  2544. z = &NODE_DATA(nid)->node_zones[zone];
  2545. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  2546. /*
  2547. * There can be holes in boot-time mem_map[]s
  2548. * handed to this function. They do not
  2549. * exist on hotplugged memory.
  2550. */
  2551. if (context == MEMMAP_EARLY) {
  2552. if (!early_pfn_valid(pfn))
  2553. continue;
  2554. if (!early_pfn_in_nid(pfn, nid))
  2555. continue;
  2556. }
  2557. page = pfn_to_page(pfn);
  2558. set_page_links(page, zone, nid, pfn);
  2559. mminit_verify_page_links(page, zone, nid, pfn);
  2560. init_page_count(page);
  2561. reset_page_mapcount(page);
  2562. SetPageReserved(page);
  2563. /*
  2564. * Mark the block movable so that blocks are reserved for
  2565. * movable at startup. This will force kernel allocations
  2566. * to reserve their blocks rather than leaking throughout
  2567. * the address space during boot when many long-lived
  2568. * kernel allocations are made. Later some blocks near
  2569. * the start are marked MIGRATE_RESERVE by
  2570. * setup_zone_migrate_reserve()
  2571. *
  2572. * bitmap is created for zone's valid pfn range. but memmap
  2573. * can be created for invalid pages (for alignment)
  2574. * check here not to call set_pageblock_migratetype() against
  2575. * pfn out of zone.
  2576. */
  2577. if ((z->zone_start_pfn <= pfn)
  2578. && (pfn < z->zone_start_pfn + z->spanned_pages)
  2579. && !(pfn & (pageblock_nr_pages - 1)))
  2580. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2581. INIT_LIST_HEAD(&page->lru);
  2582. #ifdef WANT_PAGE_VIRTUAL
  2583. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  2584. if (!is_highmem_idx(zone))
  2585. set_page_address(page, __va(pfn << PAGE_SHIFT));
  2586. #endif
  2587. }
  2588. }
  2589. static void __meminit zone_init_free_lists(struct zone *zone)
  2590. {
  2591. int order, t;
  2592. for_each_migratetype_order(order, t) {
  2593. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  2594. zone->free_area[order].nr_free = 0;
  2595. }
  2596. }
  2597. #ifndef __HAVE_ARCH_MEMMAP_INIT
  2598. #define memmap_init(size, nid, zone, start_pfn) \
  2599. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  2600. #endif
  2601. static int zone_batchsize(struct zone *zone)
  2602. {
  2603. #ifdef CONFIG_MMU
  2604. int batch;
  2605. /*
  2606. * The per-cpu-pages pools are set to around 1000th of the
  2607. * size of the zone. But no more than 1/2 of a meg.
  2608. *
  2609. * OK, so we don't know how big the cache is. So guess.
  2610. */
  2611. batch = zone->present_pages / 1024;
  2612. if (batch * PAGE_SIZE > 512 * 1024)
  2613. batch = (512 * 1024) / PAGE_SIZE;
  2614. batch /= 4; /* We effectively *= 4 below */
  2615. if (batch < 1)
  2616. batch = 1;
  2617. /*
  2618. * Clamp the batch to a 2^n - 1 value. Having a power
  2619. * of 2 value was found to be more likely to have
  2620. * suboptimal cache aliasing properties in some cases.
  2621. *
  2622. * For example if 2 tasks are alternately allocating
  2623. * batches of pages, one task can end up with a lot
  2624. * of pages of one half of the possible page colors
  2625. * and the other with pages of the other colors.
  2626. */
  2627. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  2628. return batch;
  2629. #else
  2630. /* The deferral and batching of frees should be suppressed under NOMMU
  2631. * conditions.
  2632. *
  2633. * The problem is that NOMMU needs to be able to allocate large chunks
  2634. * of contiguous memory as there's no hardware page translation to
  2635. * assemble apparent contiguous memory from discontiguous pages.
  2636. *
  2637. * Queueing large contiguous runs of pages for batching, however,
  2638. * causes the pages to actually be freed in smaller chunks. As there
  2639. * can be a significant delay between the individual batches being
  2640. * recycled, this leads to the once large chunks of space being
  2641. * fragmented and becoming unavailable for high-order allocations.
  2642. */
  2643. return 0;
  2644. #endif
  2645. }
  2646. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  2647. {
  2648. struct per_cpu_pages *pcp;
  2649. int migratetype;
  2650. memset(p, 0, sizeof(*p));
  2651. pcp = &p->pcp;
  2652. pcp->count = 0;
  2653. pcp->high = 6 * batch;
  2654. pcp->batch = max(1UL, 1 * batch);
  2655. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  2656. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  2657. }
  2658. /*
  2659. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  2660. * to the value high for the pageset p.
  2661. */
  2662. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  2663. unsigned long high)
  2664. {
  2665. struct per_cpu_pages *pcp;
  2666. pcp = &p->pcp;
  2667. pcp->high = high;
  2668. pcp->batch = max(1UL, high/4);
  2669. if ((high/4) > (PAGE_SHIFT * 8))
  2670. pcp->batch = PAGE_SHIFT * 8;
  2671. }
  2672. #ifdef CONFIG_NUMA
  2673. /*
  2674. * Boot pageset table. One per cpu which is going to be used for all
  2675. * zones and all nodes. The parameters will be set in such a way
  2676. * that an item put on a list will immediately be handed over to
  2677. * the buddy list. This is safe since pageset manipulation is done
  2678. * with interrupts disabled.
  2679. *
  2680. * Some NUMA counter updates may also be caught by the boot pagesets.
  2681. *
  2682. * The boot_pagesets must be kept even after bootup is complete for
  2683. * unused processors and/or zones. They do play a role for bootstrapping
  2684. * hotplugged processors.
  2685. *
  2686. * zoneinfo_show() and maybe other functions do
  2687. * not check if the processor is online before following the pageset pointer.
  2688. * Other parts of the kernel may not check if the zone is available.
  2689. */
  2690. static struct per_cpu_pageset boot_pageset[NR_CPUS];
  2691. /*
  2692. * Dynamically allocate memory for the
  2693. * per cpu pageset array in struct zone.
  2694. */
  2695. static int __cpuinit process_zones(int cpu)
  2696. {
  2697. struct zone *zone, *dzone;
  2698. int node = cpu_to_node(cpu);
  2699. node_set_state(node, N_CPU); /* this node has a cpu */
  2700. for_each_populated_zone(zone) {
  2701. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  2702. GFP_KERNEL, node);
  2703. if (!zone_pcp(zone, cpu))
  2704. goto bad;
  2705. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  2706. if (percpu_pagelist_fraction)
  2707. setup_pagelist_highmark(zone_pcp(zone, cpu),
  2708. (zone->present_pages / percpu_pagelist_fraction));
  2709. }
  2710. return 0;
  2711. bad:
  2712. for_each_zone(dzone) {
  2713. if (!populated_zone(dzone))
  2714. continue;
  2715. if (dzone == zone)
  2716. break;
  2717. kfree(zone_pcp(dzone, cpu));
  2718. zone_pcp(dzone, cpu) = &boot_pageset[cpu];
  2719. }
  2720. return -ENOMEM;
  2721. }
  2722. static inline void free_zone_pagesets(int cpu)
  2723. {
  2724. struct zone *zone;
  2725. for_each_zone(zone) {
  2726. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  2727. /* Free per_cpu_pageset if it is slab allocated */
  2728. if (pset != &boot_pageset[cpu])
  2729. kfree(pset);
  2730. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  2731. }
  2732. }
  2733. static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
  2734. unsigned long action,
  2735. void *hcpu)
  2736. {
  2737. int cpu = (long)hcpu;
  2738. int ret = NOTIFY_OK;
  2739. switch (action) {
  2740. case CPU_UP_PREPARE:
  2741. case CPU_UP_PREPARE_FROZEN:
  2742. if (process_zones(cpu))
  2743. ret = NOTIFY_BAD;
  2744. break;
  2745. case CPU_UP_CANCELED:
  2746. case CPU_UP_CANCELED_FROZEN:
  2747. case CPU_DEAD:
  2748. case CPU_DEAD_FROZEN:
  2749. free_zone_pagesets(cpu);
  2750. break;
  2751. default:
  2752. break;
  2753. }
  2754. return ret;
  2755. }
  2756. static struct notifier_block __cpuinitdata pageset_notifier =
  2757. { &pageset_cpuup_callback, NULL, 0 };
  2758. void __init setup_per_cpu_pageset(void)
  2759. {
  2760. int err;
  2761. /* Initialize per_cpu_pageset for cpu 0.
  2762. * A cpuup callback will do this for every cpu
  2763. * as it comes online
  2764. */
  2765. err = process_zones(smp_processor_id());
  2766. BUG_ON(err);
  2767. register_cpu_notifier(&pageset_notifier);
  2768. }
  2769. #endif
  2770. static noinline __init_refok
  2771. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  2772. {
  2773. int i;
  2774. struct pglist_data *pgdat = zone->zone_pgdat;
  2775. size_t alloc_size;
  2776. /*
  2777. * The per-page waitqueue mechanism uses hashed waitqueues
  2778. * per zone.
  2779. */
  2780. zone->wait_table_hash_nr_entries =
  2781. wait_table_hash_nr_entries(zone_size_pages);
  2782. zone->wait_table_bits =
  2783. wait_table_bits(zone->wait_table_hash_nr_entries);
  2784. alloc_size = zone->wait_table_hash_nr_entries
  2785. * sizeof(wait_queue_head_t);
  2786. if (!slab_is_available()) {
  2787. zone->wait_table = (wait_queue_head_t *)
  2788. alloc_bootmem_node(pgdat, alloc_size);
  2789. } else {
  2790. /*
  2791. * This case means that a zone whose size was 0 gets new memory
  2792. * via memory hot-add.
  2793. * But it may be the case that a new node was hot-added. In
  2794. * this case vmalloc() will not be able to use this new node's
  2795. * memory - this wait_table must be initialized to use this new
  2796. * node itself as well.
  2797. * To use this new node's memory, further consideration will be
  2798. * necessary.
  2799. */
  2800. zone->wait_table = vmalloc(alloc_size);
  2801. }
  2802. if (!zone->wait_table)
  2803. return -ENOMEM;
  2804. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  2805. init_waitqueue_head(zone->wait_table + i);
  2806. return 0;
  2807. }
  2808. static int __zone_pcp_update(void *data)
  2809. {
  2810. struct zone *zone = data;
  2811. int cpu;
  2812. unsigned long batch = zone_batchsize(zone), flags;
  2813. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  2814. struct per_cpu_pageset *pset;
  2815. struct per_cpu_pages *pcp;
  2816. pset = zone_pcp(zone, cpu);
  2817. pcp = &pset->pcp;
  2818. local_irq_save(flags);
  2819. free_pcppages_bulk(zone, pcp->count, pcp);
  2820. setup_pageset(pset, batch);
  2821. local_irq_restore(flags);
  2822. }
  2823. return 0;
  2824. }
  2825. void zone_pcp_update(struct zone *zone)
  2826. {
  2827. stop_machine(__zone_pcp_update, zone, NULL);
  2828. }
  2829. static __meminit void zone_pcp_init(struct zone *zone)
  2830. {
  2831. int cpu;
  2832. unsigned long batch = zone_batchsize(zone);
  2833. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  2834. #ifdef CONFIG_NUMA
  2835. /* Early boot. Slab allocator not functional yet */
  2836. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  2837. setup_pageset(&boot_pageset[cpu],0);
  2838. #else
  2839. setup_pageset(zone_pcp(zone,cpu), batch);
  2840. #endif
  2841. }
  2842. if (zone->present_pages)
  2843. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  2844. zone->name, zone->present_pages, batch);
  2845. }
  2846. __meminit int init_currently_empty_zone(struct zone *zone,
  2847. unsigned long zone_start_pfn,
  2848. unsigned long size,
  2849. enum memmap_context context)
  2850. {
  2851. struct pglist_data *pgdat = zone->zone_pgdat;
  2852. int ret;
  2853. ret = zone_wait_table_init(zone, size);
  2854. if (ret)
  2855. return ret;
  2856. pgdat->nr_zones = zone_idx(zone) + 1;
  2857. zone->zone_start_pfn = zone_start_pfn;
  2858. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  2859. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  2860. pgdat->node_id,
  2861. (unsigned long)zone_idx(zone),
  2862. zone_start_pfn, (zone_start_pfn + size));
  2863. zone_init_free_lists(zone);
  2864. return 0;
  2865. }
  2866. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2867. /*
  2868. * Basic iterator support. Return the first range of PFNs for a node
  2869. * Note: nid == MAX_NUMNODES returns first region regardless of node
  2870. */
  2871. static int __meminit first_active_region_index_in_nid(int nid)
  2872. {
  2873. int i;
  2874. for (i = 0; i < nr_nodemap_entries; i++)
  2875. if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
  2876. return i;
  2877. return -1;
  2878. }
  2879. /*
  2880. * Basic iterator support. Return the next active range of PFNs for a node
  2881. * Note: nid == MAX_NUMNODES returns next region regardless of node
  2882. */
  2883. static int __meminit next_active_region_index_in_nid(int index, int nid)
  2884. {
  2885. for (index = index + 1; index < nr_nodemap_entries; index++)
  2886. if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
  2887. return index;
  2888. return -1;
  2889. }
  2890. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  2891. /*
  2892. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  2893. * Architectures may implement their own version but if add_active_range()
  2894. * was used and there are no special requirements, this is a convenient
  2895. * alternative
  2896. */
  2897. int __meminit __early_pfn_to_nid(unsigned long pfn)
  2898. {
  2899. int i;
  2900. for (i = 0; i < nr_nodemap_entries; i++) {
  2901. unsigned long start_pfn = early_node_map[i].start_pfn;
  2902. unsigned long end_pfn = early_node_map[i].end_pfn;
  2903. if (start_pfn <= pfn && pfn < end_pfn)
  2904. return early_node_map[i].nid;
  2905. }
  2906. /* This is a memory hole */
  2907. return -1;
  2908. }
  2909. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  2910. int __meminit early_pfn_to_nid(unsigned long pfn)
  2911. {
  2912. int nid;
  2913. nid = __early_pfn_to_nid(pfn);
  2914. if (nid >= 0)
  2915. return nid;
  2916. /* just returns 0 */
  2917. return 0;
  2918. }
  2919. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  2920. bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  2921. {
  2922. int nid;
  2923. nid = __early_pfn_to_nid(pfn);
  2924. if (nid >= 0 && nid != node)
  2925. return false;
  2926. return true;
  2927. }
  2928. #endif
  2929. /* Basic iterator support to walk early_node_map[] */
  2930. #define for_each_active_range_index_in_nid(i, nid) \
  2931. for (i = first_active_region_index_in_nid(nid); i != -1; \
  2932. i = next_active_region_index_in_nid(i, nid))
  2933. /**
  2934. * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  2935. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  2936. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
  2937. *
  2938. * If an architecture guarantees that all ranges registered with
  2939. * add_active_ranges() contain no holes and may be freed, this
  2940. * this function may be used instead of calling free_bootmem() manually.
  2941. */
  2942. void __init free_bootmem_with_active_regions(int nid,
  2943. unsigned long max_low_pfn)
  2944. {
  2945. int i;
  2946. for_each_active_range_index_in_nid(i, nid) {
  2947. unsigned long size_pages = 0;
  2948. unsigned long end_pfn = early_node_map[i].end_pfn;
  2949. if (early_node_map[i].start_pfn >= max_low_pfn)
  2950. continue;
  2951. if (end_pfn > max_low_pfn)
  2952. end_pfn = max_low_pfn;
  2953. size_pages = end_pfn - early_node_map[i].start_pfn;
  2954. free_bootmem_node(NODE_DATA(early_node_map[i].nid),
  2955. PFN_PHYS(early_node_map[i].start_pfn),
  2956. size_pages << PAGE_SHIFT);
  2957. }
  2958. }
  2959. void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
  2960. {
  2961. int i;
  2962. int ret;
  2963. for_each_active_range_index_in_nid(i, nid) {
  2964. ret = work_fn(early_node_map[i].start_pfn,
  2965. early_node_map[i].end_pfn, data);
  2966. if (ret)
  2967. break;
  2968. }
  2969. }
  2970. /**
  2971. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  2972. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  2973. *
  2974. * If an architecture guarantees that all ranges registered with
  2975. * add_active_ranges() contain no holes and may be freed, this
  2976. * function may be used instead of calling memory_present() manually.
  2977. */
  2978. void __init sparse_memory_present_with_active_regions(int nid)
  2979. {
  2980. int i;
  2981. for_each_active_range_index_in_nid(i, nid)
  2982. memory_present(early_node_map[i].nid,
  2983. early_node_map[i].start_pfn,
  2984. early_node_map[i].end_pfn);
  2985. }
  2986. /**
  2987. * get_pfn_range_for_nid - Return the start and end page frames for a node
  2988. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  2989. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  2990. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  2991. *
  2992. * It returns the start and end page frame of a node based on information
  2993. * provided by an arch calling add_active_range(). If called for a node
  2994. * with no available memory, a warning is printed and the start and end
  2995. * PFNs will be 0.
  2996. */
  2997. void __meminit get_pfn_range_for_nid(unsigned int nid,
  2998. unsigned long *start_pfn, unsigned long *end_pfn)
  2999. {
  3000. int i;
  3001. *start_pfn = -1UL;
  3002. *end_pfn = 0;
  3003. for_each_active_range_index_in_nid(i, nid) {
  3004. *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
  3005. *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
  3006. }
  3007. if (*start_pfn == -1UL)
  3008. *start_pfn = 0;
  3009. }
  3010. /*
  3011. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  3012. * assumption is made that zones within a node are ordered in monotonic
  3013. * increasing memory addresses so that the "highest" populated zone is used
  3014. */
  3015. static void __init find_usable_zone_for_movable(void)
  3016. {
  3017. int zone_index;
  3018. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  3019. if (zone_index == ZONE_MOVABLE)
  3020. continue;
  3021. if (arch_zone_highest_possible_pfn[zone_index] >
  3022. arch_zone_lowest_possible_pfn[zone_index])
  3023. break;
  3024. }
  3025. VM_BUG_ON(zone_index == -1);
  3026. movable_zone = zone_index;
  3027. }
  3028. /*
  3029. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  3030. * because it is sized independant of architecture. Unlike the other zones,
  3031. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  3032. * in each node depending on the size of each node and how evenly kernelcore
  3033. * is distributed. This helper function adjusts the zone ranges
  3034. * provided by the architecture for a given node by using the end of the
  3035. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  3036. * zones within a node are in order of monotonic increases memory addresses
  3037. */
  3038. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  3039. unsigned long zone_type,
  3040. unsigned long node_start_pfn,
  3041. unsigned long node_end_pfn,
  3042. unsigned long *zone_start_pfn,
  3043. unsigned long *zone_end_pfn)
  3044. {
  3045. /* Only adjust if ZONE_MOVABLE is on this node */
  3046. if (zone_movable_pfn[nid]) {
  3047. /* Size ZONE_MOVABLE */
  3048. if (zone_type == ZONE_MOVABLE) {
  3049. *zone_start_pfn = zone_movable_pfn[nid];
  3050. *zone_end_pfn = min(node_end_pfn,
  3051. arch_zone_highest_possible_pfn[movable_zone]);
  3052. /* Adjust for ZONE_MOVABLE starting within this range */
  3053. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  3054. *zone_end_pfn > zone_movable_pfn[nid]) {
  3055. *zone_end_pfn = zone_movable_pfn[nid];
  3056. /* Check if this whole range is within ZONE_MOVABLE */
  3057. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  3058. *zone_start_pfn = *zone_end_pfn;
  3059. }
  3060. }
  3061. /*
  3062. * Return the number of pages a zone spans in a node, including holes
  3063. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  3064. */
  3065. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  3066. unsigned long zone_type,
  3067. unsigned long *ignored)
  3068. {
  3069. unsigned long node_start_pfn, node_end_pfn;
  3070. unsigned long zone_start_pfn, zone_end_pfn;
  3071. /* Get the start and end of the node and zone */
  3072. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  3073. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  3074. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  3075. adjust_zone_range_for_zone_movable(nid, zone_type,
  3076. node_start_pfn, node_end_pfn,
  3077. &zone_start_pfn, &zone_end_pfn);
  3078. /* Check that this node has pages within the zone's required range */
  3079. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  3080. return 0;
  3081. /* Move the zone boundaries inside the node if necessary */
  3082. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  3083. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  3084. /* Return the spanned pages */
  3085. return zone_end_pfn - zone_start_pfn;
  3086. }
  3087. /*
  3088. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  3089. * then all holes in the requested range will be accounted for.
  3090. */
  3091. static unsigned long __meminit __absent_pages_in_range(int nid,
  3092. unsigned long range_start_pfn,
  3093. unsigned long range_end_pfn)
  3094. {
  3095. int i = 0;
  3096. unsigned long prev_end_pfn = 0, hole_pages = 0;
  3097. unsigned long start_pfn;
  3098. /* Find the end_pfn of the first active range of pfns in the node */
  3099. i = first_active_region_index_in_nid(nid);
  3100. if (i == -1)
  3101. return 0;
  3102. prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  3103. /* Account for ranges before physical memory on this node */
  3104. if (early_node_map[i].start_pfn > range_start_pfn)
  3105. hole_pages = prev_end_pfn - range_start_pfn;
  3106. /* Find all holes for the zone within the node */
  3107. for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
  3108. /* No need to continue if prev_end_pfn is outside the zone */
  3109. if (prev_end_pfn >= range_end_pfn)
  3110. break;
  3111. /* Make sure the end of the zone is not within the hole */
  3112. start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  3113. prev_end_pfn = max(prev_end_pfn, range_start_pfn);
  3114. /* Update the hole size cound and move on */
  3115. if (start_pfn > range_start_pfn) {
  3116. BUG_ON(prev_end_pfn > start_pfn);
  3117. hole_pages += start_pfn - prev_end_pfn;
  3118. }
  3119. prev_end_pfn = early_node_map[i].end_pfn;
  3120. }
  3121. /* Account for ranges past physical memory on this node */
  3122. if (range_end_pfn > prev_end_pfn)
  3123. hole_pages += range_end_pfn -
  3124. max(range_start_pfn, prev_end_pfn);
  3125. return hole_pages;
  3126. }
  3127. /**
  3128. * absent_pages_in_range - Return number of page frames in holes within a range
  3129. * @start_pfn: The start PFN to start searching for holes
  3130. * @end_pfn: The end PFN to stop searching for holes
  3131. *
  3132. * It returns the number of pages frames in memory holes within a range.
  3133. */
  3134. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  3135. unsigned long end_pfn)
  3136. {
  3137. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  3138. }
  3139. /* Return the number of page frames in holes in a zone on a node */
  3140. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  3141. unsigned long zone_type,
  3142. unsigned long *ignored)
  3143. {
  3144. unsigned long node_start_pfn, node_end_pfn;
  3145. unsigned long zone_start_pfn, zone_end_pfn;
  3146. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  3147. zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
  3148. node_start_pfn);
  3149. zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
  3150. node_end_pfn);
  3151. adjust_zone_range_for_zone_movable(nid, zone_type,
  3152. node_start_pfn, node_end_pfn,
  3153. &zone_start_pfn, &zone_end_pfn);
  3154. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  3155. }
  3156. #else
  3157. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  3158. unsigned long zone_type,
  3159. unsigned long *zones_size)
  3160. {
  3161. return zones_size[zone_type];
  3162. }
  3163. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  3164. unsigned long zone_type,
  3165. unsigned long *zholes_size)
  3166. {
  3167. if (!zholes_size)
  3168. return 0;
  3169. return zholes_size[zone_type];
  3170. }
  3171. #endif
  3172. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  3173. unsigned long *zones_size, unsigned long *zholes_size)
  3174. {
  3175. unsigned long realtotalpages, totalpages = 0;
  3176. enum zone_type i;
  3177. for (i = 0; i < MAX_NR_ZONES; i++)
  3178. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  3179. zones_size);
  3180. pgdat->node_spanned_pages = totalpages;
  3181. realtotalpages = totalpages;
  3182. for (i = 0; i < MAX_NR_ZONES; i++)
  3183. realtotalpages -=
  3184. zone_absent_pages_in_node(pgdat->node_id, i,
  3185. zholes_size);
  3186. pgdat->node_present_pages = realtotalpages;
  3187. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  3188. realtotalpages);
  3189. }
  3190. #ifndef CONFIG_SPARSEMEM
  3191. /*
  3192. * Calculate the size of the zone->blockflags rounded to an unsigned long
  3193. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  3194. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  3195. * round what is now in bits to nearest long in bits, then return it in
  3196. * bytes.
  3197. */
  3198. static unsigned long __init usemap_size(unsigned long zonesize)
  3199. {
  3200. unsigned long usemapsize;
  3201. usemapsize = roundup(zonesize, pageblock_nr_pages);
  3202. usemapsize = usemapsize >> pageblock_order;
  3203. usemapsize *= NR_PAGEBLOCK_BITS;
  3204. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  3205. return usemapsize / 8;
  3206. }
  3207. static void __init setup_usemap(struct pglist_data *pgdat,
  3208. struct zone *zone, unsigned long zonesize)
  3209. {
  3210. unsigned long usemapsize = usemap_size(zonesize);
  3211. zone->pageblock_flags = NULL;
  3212. if (usemapsize)
  3213. zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
  3214. }
  3215. #else
  3216. static void inline setup_usemap(struct pglist_data *pgdat,
  3217. struct zone *zone, unsigned long zonesize) {}
  3218. #endif /* CONFIG_SPARSEMEM */
  3219. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  3220. /* Return a sensible default order for the pageblock size. */
  3221. static inline int pageblock_default_order(void)
  3222. {
  3223. if (HPAGE_SHIFT > PAGE_SHIFT)
  3224. return HUGETLB_PAGE_ORDER;
  3225. return MAX_ORDER-1;
  3226. }
  3227. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  3228. static inline void __init set_pageblock_order(unsigned int order)
  3229. {
  3230. /* Check that pageblock_nr_pages has not already been setup */
  3231. if (pageblock_order)
  3232. return;
  3233. /*
  3234. * Assume the largest contiguous order of interest is a huge page.
  3235. * This value may be variable depending on boot parameters on IA64
  3236. */
  3237. pageblock_order = order;
  3238. }
  3239. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3240. /*
  3241. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  3242. * and pageblock_default_order() are unused as pageblock_order is set
  3243. * at compile-time. See include/linux/pageblock-flags.h for the values of
  3244. * pageblock_order based on the kernel config
  3245. */
  3246. static inline int pageblock_default_order(unsigned int order)
  3247. {
  3248. return MAX_ORDER-1;
  3249. }
  3250. #define set_pageblock_order(x) do {} while (0)
  3251. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3252. /*
  3253. * Set up the zone data structures:
  3254. * - mark all pages reserved
  3255. * - mark all memory queues empty
  3256. * - clear the memory bitmaps
  3257. */
  3258. static void __paginginit free_area_init_core(struct pglist_data *pgdat,
  3259. unsigned long *zones_size, unsigned long *zholes_size)
  3260. {
  3261. enum zone_type j;
  3262. int nid = pgdat->node_id;
  3263. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  3264. int ret;
  3265. pgdat_resize_init(pgdat);
  3266. pgdat->nr_zones = 0;
  3267. init_waitqueue_head(&pgdat->kswapd_wait);
  3268. pgdat->kswapd_max_order = 0;
  3269. pgdat_page_cgroup_init(pgdat);
  3270. for (j = 0; j < MAX_NR_ZONES; j++) {
  3271. struct zone *zone = pgdat->node_zones + j;
  3272. unsigned long size, realsize, memmap_pages;
  3273. enum lru_list l;
  3274. size = zone_spanned_pages_in_node(nid, j, zones_size);
  3275. realsize = size - zone_absent_pages_in_node(nid, j,
  3276. zholes_size);
  3277. /*
  3278. * Adjust realsize so that it accounts for how much memory
  3279. * is used by this zone for memmap. This affects the watermark
  3280. * and per-cpu initialisations
  3281. */
  3282. memmap_pages =
  3283. PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
  3284. if (realsize >= memmap_pages) {
  3285. realsize -= memmap_pages;
  3286. if (memmap_pages)
  3287. printk(KERN_DEBUG
  3288. " %s zone: %lu pages used for memmap\n",
  3289. zone_names[j], memmap_pages);
  3290. } else
  3291. printk(KERN_WARNING
  3292. " %s zone: %lu pages exceeds realsize %lu\n",
  3293. zone_names[j], memmap_pages, realsize);
  3294. /* Account for reserved pages */
  3295. if (j == 0 && realsize > dma_reserve) {
  3296. realsize -= dma_reserve;
  3297. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  3298. zone_names[0], dma_reserve);
  3299. }
  3300. if (!is_highmem_idx(j))
  3301. nr_kernel_pages += realsize;
  3302. nr_all_pages += realsize;
  3303. zone->spanned_pages = size;
  3304. zone->present_pages = realsize;
  3305. #ifdef CONFIG_NUMA
  3306. zone->node = nid;
  3307. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  3308. / 100;
  3309. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  3310. #endif
  3311. zone->name = zone_names[j];
  3312. spin_lock_init(&zone->lock);
  3313. spin_lock_init(&zone->lru_lock);
  3314. zone_seqlock_init(zone);
  3315. zone->zone_pgdat = pgdat;
  3316. zone->prev_priority = DEF_PRIORITY;
  3317. zone_pcp_init(zone);
  3318. for_each_lru(l) {
  3319. INIT_LIST_HEAD(&zone->lru[l].list);
  3320. zone->reclaim_stat.nr_saved_scan[l] = 0;
  3321. }
  3322. zone->reclaim_stat.recent_rotated[0] = 0;
  3323. zone->reclaim_stat.recent_rotated[1] = 0;
  3324. zone->reclaim_stat.recent_scanned[0] = 0;
  3325. zone->reclaim_stat.recent_scanned[1] = 0;
  3326. zap_zone_vm_stats(zone);
  3327. zone->flags = 0;
  3328. if (!size)
  3329. continue;
  3330. set_pageblock_order(pageblock_default_order());
  3331. setup_usemap(pgdat, zone, size);
  3332. ret = init_currently_empty_zone(zone, zone_start_pfn,
  3333. size, MEMMAP_EARLY);
  3334. BUG_ON(ret);
  3335. memmap_init(size, nid, j, zone_start_pfn);
  3336. zone_start_pfn += size;
  3337. }
  3338. }
  3339. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  3340. {
  3341. /* Skip empty nodes */
  3342. if (!pgdat->node_spanned_pages)
  3343. return;
  3344. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3345. /* ia64 gets its own node_mem_map, before this, without bootmem */
  3346. if (!pgdat->node_mem_map) {
  3347. unsigned long size, start, end;
  3348. struct page *map;
  3349. /*
  3350. * The zone's endpoints aren't required to be MAX_ORDER
  3351. * aligned but the node_mem_map endpoints must be in order
  3352. * for the buddy allocator to function correctly.
  3353. */
  3354. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  3355. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  3356. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  3357. size = (end - start) * sizeof(struct page);
  3358. map = alloc_remap(pgdat->node_id, size);
  3359. if (!map)
  3360. map = alloc_bootmem_node(pgdat, size);
  3361. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  3362. }
  3363. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3364. /*
  3365. * With no DISCONTIG, the global mem_map is just set as node 0's
  3366. */
  3367. if (pgdat == NODE_DATA(0)) {
  3368. mem_map = NODE_DATA(0)->node_mem_map;
  3369. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  3370. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  3371. mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
  3372. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  3373. }
  3374. #endif
  3375. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  3376. }
  3377. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  3378. unsigned long node_start_pfn, unsigned long *zholes_size)
  3379. {
  3380. pg_data_t *pgdat = NODE_DATA(nid);
  3381. pgdat->node_id = nid;
  3382. pgdat->node_start_pfn = node_start_pfn;
  3383. calculate_node_totalpages(pgdat, zones_size, zholes_size);
  3384. alloc_node_mem_map(pgdat);
  3385. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3386. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  3387. nid, (unsigned long)pgdat,
  3388. (unsigned long)pgdat->node_mem_map);
  3389. #endif
  3390. free_area_init_core(pgdat, zones_size, zholes_size);
  3391. }
  3392. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  3393. #if MAX_NUMNODES > 1
  3394. /*
  3395. * Figure out the number of possible node ids.
  3396. */
  3397. static void __init setup_nr_node_ids(void)
  3398. {
  3399. unsigned int node;
  3400. unsigned int highest = 0;
  3401. for_each_node_mask(node, node_possible_map)
  3402. highest = node;
  3403. nr_node_ids = highest + 1;
  3404. }
  3405. #else
  3406. static inline void setup_nr_node_ids(void)
  3407. {
  3408. }
  3409. #endif
  3410. /**
  3411. * add_active_range - Register a range of PFNs backed by physical memory
  3412. * @nid: The node ID the range resides on
  3413. * @start_pfn: The start PFN of the available physical memory
  3414. * @end_pfn: The end PFN of the available physical memory
  3415. *
  3416. * These ranges are stored in an early_node_map[] and later used by
  3417. * free_area_init_nodes() to calculate zone sizes and holes. If the
  3418. * range spans a memory hole, it is up to the architecture to ensure
  3419. * the memory is not freed by the bootmem allocator. If possible
  3420. * the range being registered will be merged with existing ranges.
  3421. */
  3422. void __init add_active_range(unsigned int nid, unsigned long start_pfn,
  3423. unsigned long end_pfn)
  3424. {
  3425. int i;
  3426. mminit_dprintk(MMINIT_TRACE, "memory_register",
  3427. "Entering add_active_range(%d, %#lx, %#lx) "
  3428. "%d entries of %d used\n",
  3429. nid, start_pfn, end_pfn,
  3430. nr_nodemap_entries, MAX_ACTIVE_REGIONS);
  3431. mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
  3432. /* Merge with existing active regions if possible */
  3433. for (i = 0; i < nr_nodemap_entries; i++) {
  3434. if (early_node_map[i].nid != nid)
  3435. continue;
  3436. /* Skip if an existing region covers this new one */
  3437. if (start_pfn >= early_node_map[i].start_pfn &&
  3438. end_pfn <= early_node_map[i].end_pfn)
  3439. return;
  3440. /* Merge forward if suitable */
  3441. if (start_pfn <= early_node_map[i].end_pfn &&
  3442. end_pfn > early_node_map[i].end_pfn) {
  3443. early_node_map[i].end_pfn = end_pfn;
  3444. return;
  3445. }
  3446. /* Merge backward if suitable */
  3447. if (start_pfn < early_node_map[i].end_pfn &&
  3448. end_pfn >= early_node_map[i].start_pfn) {
  3449. early_node_map[i].start_pfn = start_pfn;
  3450. return;
  3451. }
  3452. }
  3453. /* Check that early_node_map is large enough */
  3454. if (i >= MAX_ACTIVE_REGIONS) {
  3455. printk(KERN_CRIT "More than %d memory regions, truncating\n",
  3456. MAX_ACTIVE_REGIONS);
  3457. return;
  3458. }
  3459. early_node_map[i].nid = nid;
  3460. early_node_map[i].start_pfn = start_pfn;
  3461. early_node_map[i].end_pfn = end_pfn;
  3462. nr_nodemap_entries = i + 1;
  3463. }
  3464. /**
  3465. * remove_active_range - Shrink an existing registered range of PFNs
  3466. * @nid: The node id the range is on that should be shrunk
  3467. * @start_pfn: The new PFN of the range
  3468. * @end_pfn: The new PFN of the range
  3469. *
  3470. * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
  3471. * The map is kept near the end physical page range that has already been
  3472. * registered. This function allows an arch to shrink an existing registered
  3473. * range.
  3474. */
  3475. void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
  3476. unsigned long end_pfn)
  3477. {
  3478. int i, j;
  3479. int removed = 0;
  3480. printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
  3481. nid, start_pfn, end_pfn);
  3482. /* Find the old active region end and shrink */
  3483. for_each_active_range_index_in_nid(i, nid) {
  3484. if (early_node_map[i].start_pfn >= start_pfn &&
  3485. early_node_map[i].end_pfn <= end_pfn) {
  3486. /* clear it */
  3487. early_node_map[i].start_pfn = 0;
  3488. early_node_map[i].end_pfn = 0;
  3489. removed = 1;
  3490. continue;
  3491. }
  3492. if (early_node_map[i].start_pfn < start_pfn &&
  3493. early_node_map[i].end_pfn > start_pfn) {
  3494. unsigned long temp_end_pfn = early_node_map[i].end_pfn;
  3495. early_node_map[i].end_pfn = start_pfn;
  3496. if (temp_end_pfn > end_pfn)
  3497. add_active_range(nid, end_pfn, temp_end_pfn);
  3498. continue;
  3499. }
  3500. if (early_node_map[i].start_pfn >= start_pfn &&
  3501. early_node_map[i].end_pfn > end_pfn &&
  3502. early_node_map[i].start_pfn < end_pfn) {
  3503. early_node_map[i].start_pfn = end_pfn;
  3504. continue;
  3505. }
  3506. }
  3507. if (!removed)
  3508. return;
  3509. /* remove the blank ones */
  3510. for (i = nr_nodemap_entries - 1; i > 0; i--) {
  3511. if (early_node_map[i].nid != nid)
  3512. continue;
  3513. if (early_node_map[i].end_pfn)
  3514. continue;
  3515. /* we found it, get rid of it */
  3516. for (j = i; j < nr_nodemap_entries - 1; j++)
  3517. memcpy(&early_node_map[j], &early_node_map[j+1],
  3518. sizeof(early_node_map[j]));
  3519. j = nr_nodemap_entries - 1;
  3520. memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
  3521. nr_nodemap_entries--;
  3522. }
  3523. }
  3524. /**
  3525. * remove_all_active_ranges - Remove all currently registered regions
  3526. *
  3527. * During discovery, it may be found that a table like SRAT is invalid
  3528. * and an alternative discovery method must be used. This function removes
  3529. * all currently registered regions.
  3530. */
  3531. void __init remove_all_active_ranges(void)
  3532. {
  3533. memset(early_node_map, 0, sizeof(early_node_map));
  3534. nr_nodemap_entries = 0;
  3535. }
  3536. /* Compare two active node_active_regions */
  3537. static int __init cmp_node_active_region(const void *a, const void *b)
  3538. {
  3539. struct node_active_region *arange = (struct node_active_region *)a;
  3540. struct node_active_region *brange = (struct node_active_region *)b;
  3541. /* Done this way to avoid overflows */
  3542. if (arange->start_pfn > brange->start_pfn)
  3543. return 1;
  3544. if (arange->start_pfn < brange->start_pfn)
  3545. return -1;
  3546. return 0;
  3547. }
  3548. /* sort the node_map by start_pfn */
  3549. static void __init sort_node_map(void)
  3550. {
  3551. sort(early_node_map, (size_t)nr_nodemap_entries,
  3552. sizeof(struct node_active_region),
  3553. cmp_node_active_region, NULL);
  3554. }
  3555. /* Find the lowest pfn for a node */
  3556. static unsigned long __init find_min_pfn_for_node(int nid)
  3557. {
  3558. int i;
  3559. unsigned long min_pfn = ULONG_MAX;
  3560. /* Assuming a sorted map, the first range found has the starting pfn */
  3561. for_each_active_range_index_in_nid(i, nid)
  3562. min_pfn = min(min_pfn, early_node_map[i].start_pfn);
  3563. if (min_pfn == ULONG_MAX) {
  3564. printk(KERN_WARNING
  3565. "Could not find start_pfn for node %d\n", nid);
  3566. return 0;
  3567. }
  3568. return min_pfn;
  3569. }
  3570. /**
  3571. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  3572. *
  3573. * It returns the minimum PFN based on information provided via
  3574. * add_active_range().
  3575. */
  3576. unsigned long __init find_min_pfn_with_active_regions(void)
  3577. {
  3578. return find_min_pfn_for_node(MAX_NUMNODES);
  3579. }
  3580. /*
  3581. * early_calculate_totalpages()
  3582. * Sum pages in active regions for movable zone.
  3583. * Populate N_HIGH_MEMORY for calculating usable_nodes.
  3584. */
  3585. static unsigned long __init early_calculate_totalpages(void)
  3586. {
  3587. int i;
  3588. unsigned long totalpages = 0;
  3589. for (i = 0; i < nr_nodemap_entries; i++) {
  3590. unsigned long pages = early_node_map[i].end_pfn -
  3591. early_node_map[i].start_pfn;
  3592. totalpages += pages;
  3593. if (pages)
  3594. node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
  3595. }
  3596. return totalpages;
  3597. }
  3598. /*
  3599. * Find the PFN the Movable zone begins in each node. Kernel memory
  3600. * is spread evenly between nodes as long as the nodes have enough
  3601. * memory. When they don't, some nodes will have more kernelcore than
  3602. * others
  3603. */
  3604. static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
  3605. {
  3606. int i, nid;
  3607. unsigned long usable_startpfn;
  3608. unsigned long kernelcore_node, kernelcore_remaining;
  3609. /* save the state before borrow the nodemask */
  3610. nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
  3611. unsigned long totalpages = early_calculate_totalpages();
  3612. int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  3613. /*
  3614. * If movablecore was specified, calculate what size of
  3615. * kernelcore that corresponds so that memory usable for
  3616. * any allocation type is evenly spread. If both kernelcore
  3617. * and movablecore are specified, then the value of kernelcore
  3618. * will be used for required_kernelcore if it's greater than
  3619. * what movablecore would have allowed.
  3620. */
  3621. if (required_movablecore) {
  3622. unsigned long corepages;
  3623. /*
  3624. * Round-up so that ZONE_MOVABLE is at least as large as what
  3625. * was requested by the user
  3626. */
  3627. required_movablecore =
  3628. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  3629. corepages = totalpages - required_movablecore;
  3630. required_kernelcore = max(required_kernelcore, corepages);
  3631. }
  3632. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  3633. if (!required_kernelcore)
  3634. goto out;
  3635. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  3636. find_usable_zone_for_movable();
  3637. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  3638. restart:
  3639. /* Spread kernelcore memory as evenly as possible throughout nodes */
  3640. kernelcore_node = required_kernelcore / usable_nodes;
  3641. for_each_node_state(nid, N_HIGH_MEMORY) {
  3642. /*
  3643. * Recalculate kernelcore_node if the division per node
  3644. * now exceeds what is necessary to satisfy the requested
  3645. * amount of memory for the kernel
  3646. */
  3647. if (required_kernelcore < kernelcore_node)
  3648. kernelcore_node = required_kernelcore / usable_nodes;
  3649. /*
  3650. * As the map is walked, we track how much memory is usable
  3651. * by the kernel using kernelcore_remaining. When it is
  3652. * 0, the rest of the node is usable by ZONE_MOVABLE
  3653. */
  3654. kernelcore_remaining = kernelcore_node;
  3655. /* Go through each range of PFNs within this node */
  3656. for_each_active_range_index_in_nid(i, nid) {
  3657. unsigned long start_pfn, end_pfn;
  3658. unsigned long size_pages;
  3659. start_pfn = max(early_node_map[i].start_pfn,
  3660. zone_movable_pfn[nid]);
  3661. end_pfn = early_node_map[i].end_pfn;
  3662. if (start_pfn >= end_pfn)
  3663. continue;
  3664. /* Account for what is only usable for kernelcore */
  3665. if (start_pfn < usable_startpfn) {
  3666. unsigned long kernel_pages;
  3667. kernel_pages = min(end_pfn, usable_startpfn)
  3668. - start_pfn;
  3669. kernelcore_remaining -= min(kernel_pages,
  3670. kernelcore_remaining);
  3671. required_kernelcore -= min(kernel_pages,
  3672. required_kernelcore);
  3673. /* Continue if range is now fully accounted */
  3674. if (end_pfn <= usable_startpfn) {
  3675. /*
  3676. * Push zone_movable_pfn to the end so
  3677. * that if we have to rebalance
  3678. * kernelcore across nodes, we will
  3679. * not double account here
  3680. */
  3681. zone_movable_pfn[nid] = end_pfn;
  3682. continue;
  3683. }
  3684. start_pfn = usable_startpfn;
  3685. }
  3686. /*
  3687. * The usable PFN range for ZONE_MOVABLE is from
  3688. * start_pfn->end_pfn. Calculate size_pages as the
  3689. * number of pages used as kernelcore
  3690. */
  3691. size_pages = end_pfn - start_pfn;
  3692. if (size_pages > kernelcore_remaining)
  3693. size_pages = kernelcore_remaining;
  3694. zone_movable_pfn[nid] = start_pfn + size_pages;
  3695. /*
  3696. * Some kernelcore has been met, update counts and
  3697. * break if the kernelcore for this node has been
  3698. * satisified
  3699. */
  3700. required_kernelcore -= min(required_kernelcore,
  3701. size_pages);
  3702. kernelcore_remaining -= size_pages;
  3703. if (!kernelcore_remaining)
  3704. break;
  3705. }
  3706. }
  3707. /*
  3708. * If there is still required_kernelcore, we do another pass with one
  3709. * less node in the count. This will push zone_movable_pfn[nid] further
  3710. * along on the nodes that still have memory until kernelcore is
  3711. * satisified
  3712. */
  3713. usable_nodes--;
  3714. if (usable_nodes && required_kernelcore > usable_nodes)
  3715. goto restart;
  3716. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  3717. for (nid = 0; nid < MAX_NUMNODES; nid++)
  3718. zone_movable_pfn[nid] =
  3719. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  3720. out:
  3721. /* restore the node_state */
  3722. node_states[N_HIGH_MEMORY] = saved_node_state;
  3723. }
  3724. /* Any regular memory on that node ? */
  3725. static void check_for_regular_memory(pg_data_t *pgdat)
  3726. {
  3727. #ifdef CONFIG_HIGHMEM
  3728. enum zone_type zone_type;
  3729. for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
  3730. struct zone *zone = &pgdat->node_zones[zone_type];
  3731. if (zone->present_pages)
  3732. node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
  3733. }
  3734. #endif
  3735. }
  3736. /**
  3737. * free_area_init_nodes - Initialise all pg_data_t and zone data
  3738. * @max_zone_pfn: an array of max PFNs for each zone
  3739. *
  3740. * This will call free_area_init_node() for each active node in the system.
  3741. * Using the page ranges provided by add_active_range(), the size of each
  3742. * zone in each node and their holes is calculated. If the maximum PFN
  3743. * between two adjacent zones match, it is assumed that the zone is empty.
  3744. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  3745. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  3746. * starts where the previous one ended. For example, ZONE_DMA32 starts
  3747. * at arch_max_dma_pfn.
  3748. */
  3749. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  3750. {
  3751. unsigned long nid;
  3752. int i;
  3753. /* Sort early_node_map as initialisation assumes it is sorted */
  3754. sort_node_map();
  3755. /* Record where the zone boundaries are */
  3756. memset(arch_zone_lowest_possible_pfn, 0,
  3757. sizeof(arch_zone_lowest_possible_pfn));
  3758. memset(arch_zone_highest_possible_pfn, 0,
  3759. sizeof(arch_zone_highest_possible_pfn));
  3760. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  3761. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  3762. for (i = 1; i < MAX_NR_ZONES; i++) {
  3763. if (i == ZONE_MOVABLE)
  3764. continue;
  3765. arch_zone_lowest_possible_pfn[i] =
  3766. arch_zone_highest_possible_pfn[i-1];
  3767. arch_zone_highest_possible_pfn[i] =
  3768. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  3769. }
  3770. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  3771. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  3772. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  3773. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  3774. find_zone_movable_pfns_for_nodes(zone_movable_pfn);
  3775. /* Print out the zone ranges */
  3776. printk("Zone PFN ranges:\n");
  3777. for (i = 0; i < MAX_NR_ZONES; i++) {
  3778. if (i == ZONE_MOVABLE)
  3779. continue;
  3780. printk(" %-8s %0#10lx -> %0#10lx\n",
  3781. zone_names[i],
  3782. arch_zone_lowest_possible_pfn[i],
  3783. arch_zone_highest_possible_pfn[i]);
  3784. }
  3785. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  3786. printk("Movable zone start PFN for each node\n");
  3787. for (i = 0; i < MAX_NUMNODES; i++) {
  3788. if (zone_movable_pfn[i])
  3789. printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
  3790. }
  3791. /* Print out the early_node_map[] */
  3792. printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
  3793. for (i = 0; i < nr_nodemap_entries; i++)
  3794. printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
  3795. early_node_map[i].start_pfn,
  3796. early_node_map[i].end_pfn);
  3797. /* Initialise every node */
  3798. mminit_verify_pageflags_layout();
  3799. setup_nr_node_ids();
  3800. for_each_online_node(nid) {
  3801. pg_data_t *pgdat = NODE_DATA(nid);
  3802. free_area_init_node(nid, NULL,
  3803. find_min_pfn_for_node(nid), NULL);
  3804. /* Any memory on that node */
  3805. if (pgdat->node_present_pages)
  3806. node_set_state(nid, N_HIGH_MEMORY);
  3807. check_for_regular_memory(pgdat);
  3808. }
  3809. }
  3810. static int __init cmdline_parse_core(char *p, unsigned long *core)
  3811. {
  3812. unsigned long long coremem;
  3813. if (!p)
  3814. return -EINVAL;
  3815. coremem = memparse(p, &p);
  3816. *core = coremem >> PAGE_SHIFT;
  3817. /* Paranoid check that UL is enough for the coremem value */
  3818. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  3819. return 0;
  3820. }
  3821. /*
  3822. * kernelcore=size sets the amount of memory for use for allocations that
  3823. * cannot be reclaimed or migrated.
  3824. */
  3825. static int __init cmdline_parse_kernelcore(char *p)
  3826. {
  3827. return cmdline_parse_core(p, &required_kernelcore);
  3828. }
  3829. /*
  3830. * movablecore=size sets the amount of memory for use for allocations that
  3831. * can be reclaimed or migrated.
  3832. */
  3833. static int __init cmdline_parse_movablecore(char *p)
  3834. {
  3835. return cmdline_parse_core(p, &required_movablecore);
  3836. }
  3837. early_param("kernelcore", cmdline_parse_kernelcore);
  3838. early_param("movablecore", cmdline_parse_movablecore);
  3839. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  3840. /**
  3841. * set_dma_reserve - set the specified number of pages reserved in the first zone
  3842. * @new_dma_reserve: The number of pages to mark reserved
  3843. *
  3844. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  3845. * In the DMA zone, a significant percentage may be consumed by kernel image
  3846. * and other unfreeable allocations which can skew the watermarks badly. This
  3847. * function may optionally be used to account for unfreeable pages in the
  3848. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  3849. * smaller per-cpu batchsize.
  3850. */
  3851. void __init set_dma_reserve(unsigned long new_dma_reserve)
  3852. {
  3853. dma_reserve = new_dma_reserve;
  3854. }
  3855. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3856. struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
  3857. EXPORT_SYMBOL(contig_page_data);
  3858. #endif
  3859. void __init free_area_init(unsigned long *zones_size)
  3860. {
  3861. free_area_init_node(0, zones_size,
  3862. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  3863. }
  3864. static int page_alloc_cpu_notify(struct notifier_block *self,
  3865. unsigned long action, void *hcpu)
  3866. {
  3867. int cpu = (unsigned long)hcpu;
  3868. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  3869. drain_pages(cpu);
  3870. /*
  3871. * Spill the event counters of the dead processor
  3872. * into the current processors event counters.
  3873. * This artificially elevates the count of the current
  3874. * processor.
  3875. */
  3876. vm_events_fold_cpu(cpu);
  3877. /*
  3878. * Zero the differential counters of the dead processor
  3879. * so that the vm statistics are consistent.
  3880. *
  3881. * This is only okay since the processor is dead and cannot
  3882. * race with what we are doing.
  3883. */
  3884. refresh_cpu_vm_stats(cpu);
  3885. }
  3886. return NOTIFY_OK;
  3887. }
  3888. void __init page_alloc_init(void)
  3889. {
  3890. hotcpu_notifier(page_alloc_cpu_notify, 0);
  3891. }
  3892. /*
  3893. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  3894. * or min_free_kbytes changes.
  3895. */
  3896. static void calculate_totalreserve_pages(void)
  3897. {
  3898. struct pglist_data *pgdat;
  3899. unsigned long reserve_pages = 0;
  3900. enum zone_type i, j;
  3901. for_each_online_pgdat(pgdat) {
  3902. for (i = 0; i < MAX_NR_ZONES; i++) {
  3903. struct zone *zone = pgdat->node_zones + i;
  3904. unsigned long max = 0;
  3905. /* Find valid and maximum lowmem_reserve in the zone */
  3906. for (j = i; j < MAX_NR_ZONES; j++) {
  3907. if (zone->lowmem_reserve[j] > max)
  3908. max = zone->lowmem_reserve[j];
  3909. }
  3910. /* we treat the high watermark as reserved pages. */
  3911. max += high_wmark_pages(zone);
  3912. if (max > zone->present_pages)
  3913. max = zone->present_pages;
  3914. reserve_pages += max;
  3915. }
  3916. }
  3917. totalreserve_pages = reserve_pages;
  3918. }
  3919. /*
  3920. * setup_per_zone_lowmem_reserve - called whenever
  3921. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  3922. * has a correct pages reserved value, so an adequate number of
  3923. * pages are left in the zone after a successful __alloc_pages().
  3924. */
  3925. static void setup_per_zone_lowmem_reserve(void)
  3926. {
  3927. struct pglist_data *pgdat;
  3928. enum zone_type j, idx;
  3929. for_each_online_pgdat(pgdat) {
  3930. for (j = 0; j < MAX_NR_ZONES; j++) {
  3931. struct zone *zone = pgdat->node_zones + j;
  3932. unsigned long present_pages = zone->present_pages;
  3933. zone->lowmem_reserve[j] = 0;
  3934. idx = j;
  3935. while (idx) {
  3936. struct zone *lower_zone;
  3937. idx--;
  3938. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  3939. sysctl_lowmem_reserve_ratio[idx] = 1;
  3940. lower_zone = pgdat->node_zones + idx;
  3941. lower_zone->lowmem_reserve[j] = present_pages /
  3942. sysctl_lowmem_reserve_ratio[idx];
  3943. present_pages += lower_zone->present_pages;
  3944. }
  3945. }
  3946. }
  3947. /* update totalreserve_pages */
  3948. calculate_totalreserve_pages();
  3949. }
  3950. /**
  3951. * setup_per_zone_wmarks - called when min_free_kbytes changes
  3952. * or when memory is hot-{added|removed}
  3953. *
  3954. * Ensures that the watermark[min,low,high] values for each zone are set
  3955. * correctly with respect to min_free_kbytes.
  3956. */
  3957. void setup_per_zone_wmarks(void)
  3958. {
  3959. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  3960. unsigned long lowmem_pages = 0;
  3961. struct zone *zone;
  3962. unsigned long flags;
  3963. /* Calculate total number of !ZONE_HIGHMEM pages */
  3964. for_each_zone(zone) {
  3965. if (!is_highmem(zone))
  3966. lowmem_pages += zone->present_pages;
  3967. }
  3968. for_each_zone(zone) {
  3969. u64 tmp;
  3970. spin_lock_irqsave(&zone->lock, flags);
  3971. tmp = (u64)pages_min * zone->present_pages;
  3972. do_div(tmp, lowmem_pages);
  3973. if (is_highmem(zone)) {
  3974. /*
  3975. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  3976. * need highmem pages, so cap pages_min to a small
  3977. * value here.
  3978. *
  3979. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  3980. * deltas controls asynch page reclaim, and so should
  3981. * not be capped for highmem.
  3982. */
  3983. int min_pages;
  3984. min_pages = zone->present_pages / 1024;
  3985. if (min_pages < SWAP_CLUSTER_MAX)
  3986. min_pages = SWAP_CLUSTER_MAX;
  3987. if (min_pages > 128)
  3988. min_pages = 128;
  3989. zone->watermark[WMARK_MIN] = min_pages;
  3990. } else {
  3991. /*
  3992. * If it's a lowmem zone, reserve a number of pages
  3993. * proportionate to the zone's size.
  3994. */
  3995. zone->watermark[WMARK_MIN] = tmp;
  3996. }
  3997. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
  3998. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
  3999. setup_zone_migrate_reserve(zone);
  4000. spin_unlock_irqrestore(&zone->lock, flags);
  4001. }
  4002. /* update totalreserve_pages */
  4003. calculate_totalreserve_pages();
  4004. }
  4005. /*
  4006. * The inactive anon list should be small enough that the VM never has to
  4007. * do too much work, but large enough that each inactive page has a chance
  4008. * to be referenced again before it is swapped out.
  4009. *
  4010. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  4011. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  4012. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  4013. * the anonymous pages are kept on the inactive list.
  4014. *
  4015. * total target max
  4016. * memory ratio inactive anon
  4017. * -------------------------------------
  4018. * 10MB 1 5MB
  4019. * 100MB 1 50MB
  4020. * 1GB 3 250MB
  4021. * 10GB 10 0.9GB
  4022. * 100GB 31 3GB
  4023. * 1TB 101 10GB
  4024. * 10TB 320 32GB
  4025. */
  4026. void calculate_zone_inactive_ratio(struct zone *zone)
  4027. {
  4028. unsigned int gb, ratio;
  4029. /* Zone size in gigabytes */
  4030. gb = zone->present_pages >> (30 - PAGE_SHIFT);
  4031. if (gb)
  4032. ratio = int_sqrt(10 * gb);
  4033. else
  4034. ratio = 1;
  4035. zone->inactive_ratio = ratio;
  4036. }
  4037. static void __init setup_per_zone_inactive_ratio(void)
  4038. {
  4039. struct zone *zone;
  4040. for_each_zone(zone)
  4041. calculate_zone_inactive_ratio(zone);
  4042. }
  4043. /*
  4044. * Initialise min_free_kbytes.
  4045. *
  4046. * For small machines we want it small (128k min). For large machines
  4047. * we want it large (64MB max). But it is not linear, because network
  4048. * bandwidth does not increase linearly with machine size. We use
  4049. *
  4050. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  4051. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  4052. *
  4053. * which yields
  4054. *
  4055. * 16MB: 512k
  4056. * 32MB: 724k
  4057. * 64MB: 1024k
  4058. * 128MB: 1448k
  4059. * 256MB: 2048k
  4060. * 512MB: 2896k
  4061. * 1024MB: 4096k
  4062. * 2048MB: 5792k
  4063. * 4096MB: 8192k
  4064. * 8192MB: 11584k
  4065. * 16384MB: 16384k
  4066. */
  4067. static int __init init_per_zone_wmark_min(void)
  4068. {
  4069. unsigned long lowmem_kbytes;
  4070. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  4071. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  4072. if (min_free_kbytes < 128)
  4073. min_free_kbytes = 128;
  4074. if (min_free_kbytes > 65536)
  4075. min_free_kbytes = 65536;
  4076. setup_per_zone_wmarks();
  4077. setup_per_zone_lowmem_reserve();
  4078. setup_per_zone_inactive_ratio();
  4079. return 0;
  4080. }
  4081. module_init(init_per_zone_wmark_min)
  4082. /*
  4083. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  4084. * that we can call two helper functions whenever min_free_kbytes
  4085. * changes.
  4086. */
  4087. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  4088. void __user *buffer, size_t *length, loff_t *ppos)
  4089. {
  4090. proc_dointvec(table, write, buffer, length, ppos);
  4091. if (write)
  4092. setup_per_zone_wmarks();
  4093. return 0;
  4094. }
  4095. #ifdef CONFIG_NUMA
  4096. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  4097. void __user *buffer, size_t *length, loff_t *ppos)
  4098. {
  4099. struct zone *zone;
  4100. int rc;
  4101. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4102. if (rc)
  4103. return rc;
  4104. for_each_zone(zone)
  4105. zone->min_unmapped_pages = (zone->present_pages *
  4106. sysctl_min_unmapped_ratio) / 100;
  4107. return 0;
  4108. }
  4109. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  4110. void __user *buffer, size_t *length, loff_t *ppos)
  4111. {
  4112. struct zone *zone;
  4113. int rc;
  4114. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4115. if (rc)
  4116. return rc;
  4117. for_each_zone(zone)
  4118. zone->min_slab_pages = (zone->present_pages *
  4119. sysctl_min_slab_ratio) / 100;
  4120. return 0;
  4121. }
  4122. #endif
  4123. /*
  4124. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  4125. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  4126. * whenever sysctl_lowmem_reserve_ratio changes.
  4127. *
  4128. * The reserve ratio obviously has absolutely no relation with the
  4129. * minimum watermarks. The lowmem reserve ratio can only make sense
  4130. * if in function of the boot time zone sizes.
  4131. */
  4132. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  4133. void __user *buffer, size_t *length, loff_t *ppos)
  4134. {
  4135. proc_dointvec_minmax(table, write, buffer, length, ppos);
  4136. setup_per_zone_lowmem_reserve();
  4137. return 0;
  4138. }
  4139. /*
  4140. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  4141. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  4142. * can have before it gets flushed back to buddy allocator.
  4143. */
  4144. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  4145. void __user *buffer, size_t *length, loff_t *ppos)
  4146. {
  4147. struct zone *zone;
  4148. unsigned int cpu;
  4149. int ret;
  4150. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4151. if (!write || (ret == -EINVAL))
  4152. return ret;
  4153. for_each_populated_zone(zone) {
  4154. for_each_online_cpu(cpu) {
  4155. unsigned long high;
  4156. high = zone->present_pages / percpu_pagelist_fraction;
  4157. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  4158. }
  4159. }
  4160. return 0;
  4161. }
  4162. int hashdist = HASHDIST_DEFAULT;
  4163. #ifdef CONFIG_NUMA
  4164. static int __init set_hashdist(char *str)
  4165. {
  4166. if (!str)
  4167. return 0;
  4168. hashdist = simple_strtoul(str, &str, 0);
  4169. return 1;
  4170. }
  4171. __setup("hashdist=", set_hashdist);
  4172. #endif
  4173. /*
  4174. * allocate a large system hash table from bootmem
  4175. * - it is assumed that the hash table must contain an exact power-of-2
  4176. * quantity of entries
  4177. * - limit is the number of hash buckets, not the total allocation size
  4178. */
  4179. void *__init alloc_large_system_hash(const char *tablename,
  4180. unsigned long bucketsize,
  4181. unsigned long numentries,
  4182. int scale,
  4183. int flags,
  4184. unsigned int *_hash_shift,
  4185. unsigned int *_hash_mask,
  4186. unsigned long limit)
  4187. {
  4188. unsigned long long max = limit;
  4189. unsigned long log2qty, size;
  4190. void *table = NULL;
  4191. /* allow the kernel cmdline to have a say */
  4192. if (!numentries) {
  4193. /* round applicable memory size up to nearest megabyte */
  4194. numentries = nr_kernel_pages;
  4195. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  4196. numentries >>= 20 - PAGE_SHIFT;
  4197. numentries <<= 20 - PAGE_SHIFT;
  4198. /* limit to 1 bucket per 2^scale bytes of low memory */
  4199. if (scale > PAGE_SHIFT)
  4200. numentries >>= (scale - PAGE_SHIFT);
  4201. else
  4202. numentries <<= (PAGE_SHIFT - scale);
  4203. /* Make sure we've got at least a 0-order allocation.. */
  4204. if (unlikely(flags & HASH_SMALL)) {
  4205. /* Makes no sense without HASH_EARLY */
  4206. WARN_ON(!(flags & HASH_EARLY));
  4207. if (!(numentries >> *_hash_shift)) {
  4208. numentries = 1UL << *_hash_shift;
  4209. BUG_ON(!numentries);
  4210. }
  4211. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  4212. numentries = PAGE_SIZE / bucketsize;
  4213. }
  4214. numentries = roundup_pow_of_two(numentries);
  4215. /* limit allocation size to 1/16 total memory by default */
  4216. if (max == 0) {
  4217. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  4218. do_div(max, bucketsize);
  4219. }
  4220. if (numentries > max)
  4221. numentries = max;
  4222. log2qty = ilog2(numentries);
  4223. do {
  4224. size = bucketsize << log2qty;
  4225. if (flags & HASH_EARLY)
  4226. table = alloc_bootmem_nopanic(size);
  4227. else if (hashdist)
  4228. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  4229. else {
  4230. /*
  4231. * If bucketsize is not a power-of-two, we may free
  4232. * some pages at the end of hash table which
  4233. * alloc_pages_exact() automatically does
  4234. */
  4235. if (get_order(size) < MAX_ORDER) {
  4236. table = alloc_pages_exact(size, GFP_ATOMIC);
  4237. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  4238. }
  4239. }
  4240. } while (!table && size > PAGE_SIZE && --log2qty);
  4241. if (!table)
  4242. panic("Failed to allocate %s hash table\n", tablename);
  4243. printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
  4244. tablename,
  4245. (1U << log2qty),
  4246. ilog2(size) - PAGE_SHIFT,
  4247. size);
  4248. if (_hash_shift)
  4249. *_hash_shift = log2qty;
  4250. if (_hash_mask)
  4251. *_hash_mask = (1 << log2qty) - 1;
  4252. return table;
  4253. }
  4254. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  4255. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  4256. unsigned long pfn)
  4257. {
  4258. #ifdef CONFIG_SPARSEMEM
  4259. return __pfn_to_section(pfn)->pageblock_flags;
  4260. #else
  4261. return zone->pageblock_flags;
  4262. #endif /* CONFIG_SPARSEMEM */
  4263. }
  4264. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  4265. {
  4266. #ifdef CONFIG_SPARSEMEM
  4267. pfn &= (PAGES_PER_SECTION-1);
  4268. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4269. #else
  4270. pfn = pfn - zone->zone_start_pfn;
  4271. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4272. #endif /* CONFIG_SPARSEMEM */
  4273. }
  4274. /**
  4275. * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
  4276. * @page: The page within the block of interest
  4277. * @start_bitidx: The first bit of interest to retrieve
  4278. * @end_bitidx: The last bit of interest
  4279. * returns pageblock_bits flags
  4280. */
  4281. unsigned long get_pageblock_flags_group(struct page *page,
  4282. int start_bitidx, int end_bitidx)
  4283. {
  4284. struct zone *zone;
  4285. unsigned long *bitmap;
  4286. unsigned long pfn, bitidx;
  4287. unsigned long flags = 0;
  4288. unsigned long value = 1;
  4289. zone = page_zone(page);
  4290. pfn = page_to_pfn(page);
  4291. bitmap = get_pageblock_bitmap(zone, pfn);
  4292. bitidx = pfn_to_bitidx(zone, pfn);
  4293. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4294. if (test_bit(bitidx + start_bitidx, bitmap))
  4295. flags |= value;
  4296. return flags;
  4297. }
  4298. /**
  4299. * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
  4300. * @page: The page within the block of interest
  4301. * @start_bitidx: The first bit of interest
  4302. * @end_bitidx: The last bit of interest
  4303. * @flags: The flags to set
  4304. */
  4305. void set_pageblock_flags_group(struct page *page, unsigned long flags,
  4306. int start_bitidx, int end_bitidx)
  4307. {
  4308. struct zone *zone;
  4309. unsigned long *bitmap;
  4310. unsigned long pfn, bitidx;
  4311. unsigned long value = 1;
  4312. zone = page_zone(page);
  4313. pfn = page_to_pfn(page);
  4314. bitmap = get_pageblock_bitmap(zone, pfn);
  4315. bitidx = pfn_to_bitidx(zone, pfn);
  4316. VM_BUG_ON(pfn < zone->zone_start_pfn);
  4317. VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
  4318. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4319. if (flags & value)
  4320. __set_bit(bitidx + start_bitidx, bitmap);
  4321. else
  4322. __clear_bit(bitidx + start_bitidx, bitmap);
  4323. }
  4324. /*
  4325. * This is designed as sub function...plz see page_isolation.c also.
  4326. * set/clear page block's type to be ISOLATE.
  4327. * page allocater never alloc memory from ISOLATE block.
  4328. */
  4329. int set_migratetype_isolate(struct page *page)
  4330. {
  4331. struct zone *zone;
  4332. unsigned long flags;
  4333. int ret = -EBUSY;
  4334. int zone_idx;
  4335. zone = page_zone(page);
  4336. zone_idx = zone_idx(zone);
  4337. spin_lock_irqsave(&zone->lock, flags);
  4338. /*
  4339. * In future, more migrate types will be able to be isolation target.
  4340. */
  4341. if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
  4342. zone_idx != ZONE_MOVABLE)
  4343. goto out;
  4344. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  4345. move_freepages_block(zone, page, MIGRATE_ISOLATE);
  4346. ret = 0;
  4347. out:
  4348. spin_unlock_irqrestore(&zone->lock, flags);
  4349. if (!ret)
  4350. drain_all_pages();
  4351. return ret;
  4352. }
  4353. void unset_migratetype_isolate(struct page *page)
  4354. {
  4355. struct zone *zone;
  4356. unsigned long flags;
  4357. zone = page_zone(page);
  4358. spin_lock_irqsave(&zone->lock, flags);
  4359. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  4360. goto out;
  4361. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4362. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  4363. out:
  4364. spin_unlock_irqrestore(&zone->lock, flags);
  4365. }
  4366. #ifdef CONFIG_MEMORY_HOTREMOVE
  4367. /*
  4368. * All pages in the range must be isolated before calling this.
  4369. */
  4370. void
  4371. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  4372. {
  4373. struct page *page;
  4374. struct zone *zone;
  4375. int order, i;
  4376. unsigned long pfn;
  4377. unsigned long flags;
  4378. /* find the first valid pfn */
  4379. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  4380. if (pfn_valid(pfn))
  4381. break;
  4382. if (pfn == end_pfn)
  4383. return;
  4384. zone = page_zone(pfn_to_page(pfn));
  4385. spin_lock_irqsave(&zone->lock, flags);
  4386. pfn = start_pfn;
  4387. while (pfn < end_pfn) {
  4388. if (!pfn_valid(pfn)) {
  4389. pfn++;
  4390. continue;
  4391. }
  4392. page = pfn_to_page(pfn);
  4393. BUG_ON(page_count(page));
  4394. BUG_ON(!PageBuddy(page));
  4395. order = page_order(page);
  4396. #ifdef CONFIG_DEBUG_VM
  4397. printk(KERN_INFO "remove from free list %lx %d %lx\n",
  4398. pfn, 1 << order, end_pfn);
  4399. #endif
  4400. list_del(&page->lru);
  4401. rmv_page_order(page);
  4402. zone->free_area[order].nr_free--;
  4403. __mod_zone_page_state(zone, NR_FREE_PAGES,
  4404. - (1UL << order));
  4405. for (i = 0; i < (1 << order); i++)
  4406. SetPageReserved((page+i));
  4407. pfn += (1 << order);
  4408. }
  4409. spin_unlock_irqrestore(&zone->lock, flags);
  4410. }
  4411. #endif
  4412. #ifdef CONFIG_MEMORY_FAILURE
  4413. bool is_free_buddy_page(struct page *page)
  4414. {
  4415. struct zone *zone = page_zone(page);
  4416. unsigned long pfn = page_to_pfn(page);
  4417. unsigned long flags;
  4418. int order;
  4419. spin_lock_irqsave(&zone->lock, flags);
  4420. for (order = 0; order < MAX_ORDER; order++) {
  4421. struct page *page_head = page - (pfn & ((1 << order) - 1));
  4422. if (PageBuddy(page_head) && page_order(page_head) >= order)
  4423. break;
  4424. }
  4425. spin_unlock_irqrestore(&zone->lock, flags);
  4426. return order < MAX_ORDER;
  4427. }
  4428. #endif