page_alloc.c 133 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/compiler.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/suspend.h>
  27. #include <linux/pagevec.h>
  28. #include <linux/blkdev.h>
  29. #include <linux/slab.h>
  30. #include <linux/oom.h>
  31. #include <linux/notifier.h>
  32. #include <linux/topology.h>
  33. #include <linux/sysctl.h>
  34. #include <linux/cpu.h>
  35. #include <linux/cpuset.h>
  36. #include <linux/memory_hotplug.h>
  37. #include <linux/nodemask.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/mempolicy.h>
  40. #include <linux/stop_machine.h>
  41. #include <linux/sort.h>
  42. #include <linux/pfn.h>
  43. #include <linux/backing-dev.h>
  44. #include <linux/fault-inject.h>
  45. #include <linux/page-isolation.h>
  46. #include <linux/page_cgroup.h>
  47. #include <linux/debugobjects.h>
  48. #include <linux/kmemleak.h>
  49. #include <asm/tlbflush.h>
  50. #include <asm/div64.h>
  51. #include "internal.h"
  52. /*
  53. * Array of node states.
  54. */
  55. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  56. [N_POSSIBLE] = NODE_MASK_ALL,
  57. [N_ONLINE] = { { [0] = 1UL } },
  58. #ifndef CONFIG_NUMA
  59. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  60. #ifdef CONFIG_HIGHMEM
  61. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  62. #endif
  63. [N_CPU] = { { [0] = 1UL } },
  64. #endif /* NUMA */
  65. };
  66. EXPORT_SYMBOL(node_states);
  67. unsigned long totalram_pages __read_mostly;
  68. unsigned long totalreserve_pages __read_mostly;
  69. unsigned long highest_memmap_pfn __read_mostly;
  70. int percpu_pagelist_fraction;
  71. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  72. int pageblock_order __read_mostly;
  73. #endif
  74. static void __free_pages_ok(struct page *page, unsigned int order);
  75. /*
  76. * results with 256, 32 in the lowmem_reserve sysctl:
  77. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  78. * 1G machine -> (16M dma, 784M normal, 224M high)
  79. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  80. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  81. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  82. *
  83. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  84. * don't need any ZONE_NORMAL reservation
  85. */
  86. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  87. #ifdef CONFIG_ZONE_DMA
  88. 256,
  89. #endif
  90. #ifdef CONFIG_ZONE_DMA32
  91. 256,
  92. #endif
  93. #ifdef CONFIG_HIGHMEM
  94. 32,
  95. #endif
  96. 32,
  97. };
  98. EXPORT_SYMBOL(totalram_pages);
  99. static char * const zone_names[MAX_NR_ZONES] = {
  100. #ifdef CONFIG_ZONE_DMA
  101. "DMA",
  102. #endif
  103. #ifdef CONFIG_ZONE_DMA32
  104. "DMA32",
  105. #endif
  106. "Normal",
  107. #ifdef CONFIG_HIGHMEM
  108. "HighMem",
  109. #endif
  110. "Movable",
  111. };
  112. int min_free_kbytes = 1024;
  113. unsigned long __meminitdata nr_kernel_pages;
  114. unsigned long __meminitdata nr_all_pages;
  115. static unsigned long __meminitdata dma_reserve;
  116. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  117. /*
  118. * MAX_ACTIVE_REGIONS determines the maximum number of distinct
  119. * ranges of memory (RAM) that may be registered with add_active_range().
  120. * Ranges passed to add_active_range() will be merged if possible
  121. * so the number of times add_active_range() can be called is
  122. * related to the number of nodes and the number of holes
  123. */
  124. #ifdef CONFIG_MAX_ACTIVE_REGIONS
  125. /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
  126. #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  127. #else
  128. #if MAX_NUMNODES >= 32
  129. /* If there can be many nodes, allow up to 50 holes per node */
  130. #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
  131. #else
  132. /* By default, allow up to 256 distinct regions */
  133. #define MAX_ACTIVE_REGIONS 256
  134. #endif
  135. #endif
  136. static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  137. static int __meminitdata nr_nodemap_entries;
  138. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  139. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  140. static unsigned long __initdata required_kernelcore;
  141. static unsigned long __initdata required_movablecore;
  142. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  143. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  144. int movable_zone;
  145. EXPORT_SYMBOL(movable_zone);
  146. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  147. #if MAX_NUMNODES > 1
  148. int nr_node_ids __read_mostly = MAX_NUMNODES;
  149. EXPORT_SYMBOL(nr_node_ids);
  150. #endif
  151. int page_group_by_mobility_disabled __read_mostly;
  152. static void set_pageblock_migratetype(struct page *page, int migratetype)
  153. {
  154. if (unlikely(page_group_by_mobility_disabled))
  155. migratetype = MIGRATE_UNMOVABLE;
  156. set_pageblock_flags_group(page, (unsigned long)migratetype,
  157. PB_migrate, PB_migrate_end);
  158. }
  159. #ifdef CONFIG_DEBUG_VM
  160. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  161. {
  162. int ret = 0;
  163. unsigned seq;
  164. unsigned long pfn = page_to_pfn(page);
  165. do {
  166. seq = zone_span_seqbegin(zone);
  167. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  168. ret = 1;
  169. else if (pfn < zone->zone_start_pfn)
  170. ret = 1;
  171. } while (zone_span_seqretry(zone, seq));
  172. return ret;
  173. }
  174. static int page_is_consistent(struct zone *zone, struct page *page)
  175. {
  176. if (!pfn_valid_within(page_to_pfn(page)))
  177. return 0;
  178. if (zone != page_zone(page))
  179. return 0;
  180. return 1;
  181. }
  182. /*
  183. * Temporary debugging check for pages not lying within a given zone.
  184. */
  185. static int bad_range(struct zone *zone, struct page *page)
  186. {
  187. if (page_outside_zone_boundaries(zone, page))
  188. return 1;
  189. if (!page_is_consistent(zone, page))
  190. return 1;
  191. return 0;
  192. }
  193. #else
  194. static inline int bad_range(struct zone *zone, struct page *page)
  195. {
  196. return 0;
  197. }
  198. #endif
  199. static void bad_page(struct page *page)
  200. {
  201. static unsigned long resume;
  202. static unsigned long nr_shown;
  203. static unsigned long nr_unshown;
  204. /*
  205. * Allow a burst of 60 reports, then keep quiet for that minute;
  206. * or allow a steady drip of one report per second.
  207. */
  208. if (nr_shown == 60) {
  209. if (time_before(jiffies, resume)) {
  210. nr_unshown++;
  211. goto out;
  212. }
  213. if (nr_unshown) {
  214. printk(KERN_ALERT
  215. "BUG: Bad page state: %lu messages suppressed\n",
  216. nr_unshown);
  217. nr_unshown = 0;
  218. }
  219. nr_shown = 0;
  220. }
  221. if (nr_shown++ == 0)
  222. resume = jiffies + 60 * HZ;
  223. printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
  224. current->comm, page_to_pfn(page));
  225. printk(KERN_ALERT
  226. "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
  227. page, (void *)page->flags, page_count(page),
  228. page_mapcount(page), page->mapping, page->index);
  229. dump_stack();
  230. out:
  231. /* Leave bad fields for debug, except PageBuddy could make trouble */
  232. __ClearPageBuddy(page);
  233. add_taint(TAINT_BAD_PAGE);
  234. }
  235. /*
  236. * Higher-order pages are called "compound pages". They are structured thusly:
  237. *
  238. * The first PAGE_SIZE page is called the "head page".
  239. *
  240. * The remaining PAGE_SIZE pages are called "tail pages".
  241. *
  242. * All pages have PG_compound set. All pages have their ->private pointing at
  243. * the head page (even the head page has this).
  244. *
  245. * The first tail page's ->lru.next holds the address of the compound page's
  246. * put_page() function. Its ->lru.prev holds the order of allocation.
  247. * This usage means that zero-order pages may not be compound.
  248. */
  249. static void free_compound_page(struct page *page)
  250. {
  251. __free_pages_ok(page, compound_order(page));
  252. }
  253. void prep_compound_page(struct page *page, unsigned long order)
  254. {
  255. int i;
  256. int nr_pages = 1 << order;
  257. set_compound_page_dtor(page, free_compound_page);
  258. set_compound_order(page, order);
  259. __SetPageHead(page);
  260. for (i = 1; i < nr_pages; i++) {
  261. struct page *p = page + i;
  262. __SetPageTail(p);
  263. p->first_page = page;
  264. }
  265. }
  266. #ifdef CONFIG_HUGETLBFS
  267. void prep_compound_gigantic_page(struct page *page, unsigned long order)
  268. {
  269. int i;
  270. int nr_pages = 1 << order;
  271. struct page *p = page + 1;
  272. set_compound_page_dtor(page, free_compound_page);
  273. set_compound_order(page, order);
  274. __SetPageHead(page);
  275. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  276. __SetPageTail(p);
  277. p->first_page = page;
  278. }
  279. }
  280. #endif
  281. static int destroy_compound_page(struct page *page, unsigned long order)
  282. {
  283. int i;
  284. int nr_pages = 1 << order;
  285. int bad = 0;
  286. if (unlikely(compound_order(page) != order) ||
  287. unlikely(!PageHead(page))) {
  288. bad_page(page);
  289. bad++;
  290. }
  291. __ClearPageHead(page);
  292. for (i = 1; i < nr_pages; i++) {
  293. struct page *p = page + i;
  294. if (unlikely(!PageTail(p) || (p->first_page != page))) {
  295. bad_page(page);
  296. bad++;
  297. }
  298. __ClearPageTail(p);
  299. }
  300. return bad;
  301. }
  302. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  303. {
  304. int i;
  305. /*
  306. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  307. * and __GFP_HIGHMEM from hard or soft interrupt context.
  308. */
  309. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  310. for (i = 0; i < (1 << order); i++)
  311. clear_highpage(page + i);
  312. }
  313. static inline void set_page_order(struct page *page, int order)
  314. {
  315. set_page_private(page, order);
  316. __SetPageBuddy(page);
  317. }
  318. static inline void rmv_page_order(struct page *page)
  319. {
  320. __ClearPageBuddy(page);
  321. set_page_private(page, 0);
  322. }
  323. /*
  324. * Locate the struct page for both the matching buddy in our
  325. * pair (buddy1) and the combined O(n+1) page they form (page).
  326. *
  327. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  328. * the following equation:
  329. * B2 = B1 ^ (1 << O)
  330. * For example, if the starting buddy (buddy2) is #8 its order
  331. * 1 buddy is #10:
  332. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  333. *
  334. * 2) Any buddy B will have an order O+1 parent P which
  335. * satisfies the following equation:
  336. * P = B & ~(1 << O)
  337. *
  338. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  339. */
  340. static inline struct page *
  341. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  342. {
  343. unsigned long buddy_idx = page_idx ^ (1 << order);
  344. return page + (buddy_idx - page_idx);
  345. }
  346. static inline unsigned long
  347. __find_combined_index(unsigned long page_idx, unsigned int order)
  348. {
  349. return (page_idx & ~(1 << order));
  350. }
  351. /*
  352. * This function checks whether a page is free && is the buddy
  353. * we can do coalesce a page and its buddy if
  354. * (a) the buddy is not in a hole &&
  355. * (b) the buddy is in the buddy system &&
  356. * (c) a page and its buddy have the same order &&
  357. * (d) a page and its buddy are in the same zone.
  358. *
  359. * For recording whether a page is in the buddy system, we use PG_buddy.
  360. * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
  361. *
  362. * For recording page's order, we use page_private(page).
  363. */
  364. static inline int page_is_buddy(struct page *page, struct page *buddy,
  365. int order)
  366. {
  367. if (!pfn_valid_within(page_to_pfn(buddy)))
  368. return 0;
  369. if (page_zone_id(page) != page_zone_id(buddy))
  370. return 0;
  371. if (PageBuddy(buddy) && page_order(buddy) == order) {
  372. BUG_ON(page_count(buddy) != 0);
  373. return 1;
  374. }
  375. return 0;
  376. }
  377. /*
  378. * Freeing function for a buddy system allocator.
  379. *
  380. * The concept of a buddy system is to maintain direct-mapped table
  381. * (containing bit values) for memory blocks of various "orders".
  382. * The bottom level table contains the map for the smallest allocatable
  383. * units of memory (here, pages), and each level above it describes
  384. * pairs of units from the levels below, hence, "buddies".
  385. * At a high level, all that happens here is marking the table entry
  386. * at the bottom level available, and propagating the changes upward
  387. * as necessary, plus some accounting needed to play nicely with other
  388. * parts of the VM system.
  389. * At each level, we keep a list of pages, which are heads of continuous
  390. * free pages of length of (1 << order) and marked with PG_buddy. Page's
  391. * order is recorded in page_private(page) field.
  392. * So when we are allocating or freeing one, we can derive the state of the
  393. * other. That is, if we allocate a small block, and both were
  394. * free, the remainder of the region must be split into blocks.
  395. * If a block is freed, and its buddy is also free, then this
  396. * triggers coalescing into a block of larger size.
  397. *
  398. * -- wli
  399. */
  400. static inline void __free_one_page(struct page *page,
  401. struct zone *zone, unsigned int order,
  402. int migratetype)
  403. {
  404. unsigned long page_idx;
  405. int order_size = 1 << order;
  406. if (unlikely(PageCompound(page)))
  407. if (unlikely(destroy_compound_page(page, order)))
  408. return;
  409. VM_BUG_ON(migratetype == -1);
  410. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  411. VM_BUG_ON(page_idx & (order_size - 1));
  412. VM_BUG_ON(bad_range(zone, page));
  413. __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
  414. while (order < MAX_ORDER-1) {
  415. unsigned long combined_idx;
  416. struct page *buddy;
  417. buddy = __page_find_buddy(page, page_idx, order);
  418. if (!page_is_buddy(page, buddy, order))
  419. break;
  420. /* Our buddy is free, merge with it and move up one order. */
  421. list_del(&buddy->lru);
  422. zone->free_area[order].nr_free--;
  423. rmv_page_order(buddy);
  424. combined_idx = __find_combined_index(page_idx, order);
  425. page = page + (combined_idx - page_idx);
  426. page_idx = combined_idx;
  427. order++;
  428. }
  429. set_page_order(page, order);
  430. list_add(&page->lru,
  431. &zone->free_area[order].free_list[migratetype]);
  432. zone->free_area[order].nr_free++;
  433. }
  434. static inline int free_pages_check(struct page *page)
  435. {
  436. free_page_mlock(page);
  437. if (unlikely(page_mapcount(page) |
  438. (page->mapping != NULL) |
  439. (page_count(page) != 0) |
  440. (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
  441. bad_page(page);
  442. return 1;
  443. }
  444. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  445. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  446. return 0;
  447. }
  448. /*
  449. * Frees a list of pages.
  450. * Assumes all pages on list are in same zone, and of same order.
  451. * count is the number of pages to free.
  452. *
  453. * If the zone was previously in an "all pages pinned" state then look to
  454. * see if this freeing clears that state.
  455. *
  456. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  457. * pinned" detection logic.
  458. */
  459. static void free_pages_bulk(struct zone *zone, int count,
  460. struct list_head *list, int order)
  461. {
  462. spin_lock(&zone->lock);
  463. zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
  464. zone->pages_scanned = 0;
  465. while (count--) {
  466. struct page *page;
  467. VM_BUG_ON(list_empty(list));
  468. page = list_entry(list->prev, struct page, lru);
  469. /* have to delete it as __free_one_page list manipulates */
  470. list_del(&page->lru);
  471. __free_one_page(page, zone, order, page_private(page));
  472. }
  473. spin_unlock(&zone->lock);
  474. }
  475. static void free_one_page(struct zone *zone, struct page *page, int order,
  476. int migratetype)
  477. {
  478. spin_lock(&zone->lock);
  479. zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
  480. zone->pages_scanned = 0;
  481. __free_one_page(page, zone, order, migratetype);
  482. spin_unlock(&zone->lock);
  483. }
  484. static void __free_pages_ok(struct page *page, unsigned int order)
  485. {
  486. unsigned long flags;
  487. int i;
  488. int bad = 0;
  489. for (i = 0 ; i < (1 << order) ; ++i)
  490. bad += free_pages_check(page + i);
  491. if (bad)
  492. return;
  493. if (!PageHighMem(page)) {
  494. debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  495. debug_check_no_obj_freed(page_address(page),
  496. PAGE_SIZE << order);
  497. }
  498. arch_free_page(page, order);
  499. kernel_map_pages(page, 1 << order, 0);
  500. local_irq_save(flags);
  501. __count_vm_events(PGFREE, 1 << order);
  502. free_one_page(page_zone(page), page, order,
  503. get_pageblock_migratetype(page));
  504. local_irq_restore(flags);
  505. }
  506. /*
  507. * permit the bootmem allocator to evade page validation on high-order frees
  508. */
  509. void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
  510. {
  511. if (order == 0) {
  512. __ClearPageReserved(page);
  513. set_page_count(page, 0);
  514. set_page_refcounted(page);
  515. __free_page(page);
  516. } else {
  517. int loop;
  518. prefetchw(page);
  519. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  520. struct page *p = &page[loop];
  521. if (loop + 1 < BITS_PER_LONG)
  522. prefetchw(p + 1);
  523. __ClearPageReserved(p);
  524. set_page_count(p, 0);
  525. }
  526. set_page_refcounted(page);
  527. __free_pages(page, order);
  528. }
  529. }
  530. /*
  531. * The order of subdivision here is critical for the IO subsystem.
  532. * Please do not alter this order without good reasons and regression
  533. * testing. Specifically, as large blocks of memory are subdivided,
  534. * the order in which smaller blocks are delivered depends on the order
  535. * they're subdivided in this function. This is the primary factor
  536. * influencing the order in which pages are delivered to the IO
  537. * subsystem according to empirical testing, and this is also justified
  538. * by considering the behavior of a buddy system containing a single
  539. * large block of memory acted on by a series of small allocations.
  540. * This behavior is a critical factor in sglist merging's success.
  541. *
  542. * -- wli
  543. */
  544. static inline void expand(struct zone *zone, struct page *page,
  545. int low, int high, struct free_area *area,
  546. int migratetype)
  547. {
  548. unsigned long size = 1 << high;
  549. while (high > low) {
  550. area--;
  551. high--;
  552. size >>= 1;
  553. VM_BUG_ON(bad_range(zone, &page[size]));
  554. list_add(&page[size].lru, &area->free_list[migratetype]);
  555. area->nr_free++;
  556. set_page_order(&page[size], high);
  557. }
  558. }
  559. /*
  560. * This page is about to be returned from the page allocator
  561. */
  562. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  563. {
  564. if (unlikely(page_mapcount(page) |
  565. (page->mapping != NULL) |
  566. (page_count(page) != 0) |
  567. (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
  568. bad_page(page);
  569. return 1;
  570. }
  571. set_page_private(page, 0);
  572. set_page_refcounted(page);
  573. arch_alloc_page(page, order);
  574. kernel_map_pages(page, 1 << order, 1);
  575. if (gfp_flags & __GFP_ZERO)
  576. prep_zero_page(page, order, gfp_flags);
  577. if (order && (gfp_flags & __GFP_COMP))
  578. prep_compound_page(page, order);
  579. return 0;
  580. }
  581. /*
  582. * Go through the free lists for the given migratetype and remove
  583. * the smallest available page from the freelists
  584. */
  585. static inline
  586. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  587. int migratetype)
  588. {
  589. unsigned int current_order;
  590. struct free_area * area;
  591. struct page *page;
  592. /* Find a page of the appropriate size in the preferred list */
  593. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  594. area = &(zone->free_area[current_order]);
  595. if (list_empty(&area->free_list[migratetype]))
  596. continue;
  597. page = list_entry(area->free_list[migratetype].next,
  598. struct page, lru);
  599. list_del(&page->lru);
  600. rmv_page_order(page);
  601. area->nr_free--;
  602. __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
  603. expand(zone, page, order, current_order, area, migratetype);
  604. return page;
  605. }
  606. return NULL;
  607. }
  608. /*
  609. * This array describes the order lists are fallen back to when
  610. * the free lists for the desirable migrate type are depleted
  611. */
  612. static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  613. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  614. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  615. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  616. [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
  617. };
  618. /*
  619. * Move the free pages in a range to the free lists of the requested type.
  620. * Note that start_page and end_pages are not aligned on a pageblock
  621. * boundary. If alignment is required, use move_freepages_block()
  622. */
  623. static int move_freepages(struct zone *zone,
  624. struct page *start_page, struct page *end_page,
  625. int migratetype)
  626. {
  627. struct page *page;
  628. unsigned long order;
  629. int pages_moved = 0;
  630. #ifndef CONFIG_HOLES_IN_ZONE
  631. /*
  632. * page_zone is not safe to call in this context when
  633. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  634. * anyway as we check zone boundaries in move_freepages_block().
  635. * Remove at a later date when no bug reports exist related to
  636. * grouping pages by mobility
  637. */
  638. BUG_ON(page_zone(start_page) != page_zone(end_page));
  639. #endif
  640. for (page = start_page; page <= end_page;) {
  641. /* Make sure we are not inadvertently changing nodes */
  642. VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
  643. if (!pfn_valid_within(page_to_pfn(page))) {
  644. page++;
  645. continue;
  646. }
  647. if (!PageBuddy(page)) {
  648. page++;
  649. continue;
  650. }
  651. order = page_order(page);
  652. list_del(&page->lru);
  653. list_add(&page->lru,
  654. &zone->free_area[order].free_list[migratetype]);
  655. page += 1 << order;
  656. pages_moved += 1 << order;
  657. }
  658. return pages_moved;
  659. }
  660. static int move_freepages_block(struct zone *zone, struct page *page,
  661. int migratetype)
  662. {
  663. unsigned long start_pfn, end_pfn;
  664. struct page *start_page, *end_page;
  665. start_pfn = page_to_pfn(page);
  666. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  667. start_page = pfn_to_page(start_pfn);
  668. end_page = start_page + pageblock_nr_pages - 1;
  669. end_pfn = start_pfn + pageblock_nr_pages - 1;
  670. /* Do not cross zone boundaries */
  671. if (start_pfn < zone->zone_start_pfn)
  672. start_page = page;
  673. if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
  674. return 0;
  675. return move_freepages(zone, start_page, end_page, migratetype);
  676. }
  677. /* Remove an element from the buddy allocator from the fallback list */
  678. static inline struct page *
  679. __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  680. {
  681. struct free_area * area;
  682. int current_order;
  683. struct page *page;
  684. int migratetype, i;
  685. /* Find the largest possible block of pages in the other list */
  686. for (current_order = MAX_ORDER-1; current_order >= order;
  687. --current_order) {
  688. for (i = 0; i < MIGRATE_TYPES - 1; i++) {
  689. migratetype = fallbacks[start_migratetype][i];
  690. /* MIGRATE_RESERVE handled later if necessary */
  691. if (migratetype == MIGRATE_RESERVE)
  692. continue;
  693. area = &(zone->free_area[current_order]);
  694. if (list_empty(&area->free_list[migratetype]))
  695. continue;
  696. page = list_entry(area->free_list[migratetype].next,
  697. struct page, lru);
  698. area->nr_free--;
  699. /*
  700. * If breaking a large block of pages, move all free
  701. * pages to the preferred allocation list. If falling
  702. * back for a reclaimable kernel allocation, be more
  703. * agressive about taking ownership of free pages
  704. */
  705. if (unlikely(current_order >= (pageblock_order >> 1)) ||
  706. start_migratetype == MIGRATE_RECLAIMABLE) {
  707. unsigned long pages;
  708. pages = move_freepages_block(zone, page,
  709. start_migratetype);
  710. /* Claim the whole block if over half of it is free */
  711. if (pages >= (1 << (pageblock_order-1)))
  712. set_pageblock_migratetype(page,
  713. start_migratetype);
  714. migratetype = start_migratetype;
  715. }
  716. /* Remove the page from the freelists */
  717. list_del(&page->lru);
  718. rmv_page_order(page);
  719. __mod_zone_page_state(zone, NR_FREE_PAGES,
  720. -(1UL << order));
  721. if (current_order == pageblock_order)
  722. set_pageblock_migratetype(page,
  723. start_migratetype);
  724. expand(zone, page, order, current_order, area, migratetype);
  725. return page;
  726. }
  727. }
  728. return NULL;
  729. }
  730. /*
  731. * Do the hard work of removing an element from the buddy allocator.
  732. * Call me with the zone->lock already held.
  733. */
  734. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  735. int migratetype)
  736. {
  737. struct page *page;
  738. retry_reserve:
  739. page = __rmqueue_smallest(zone, order, migratetype);
  740. if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
  741. page = __rmqueue_fallback(zone, order, migratetype);
  742. /*
  743. * Use MIGRATE_RESERVE rather than fail an allocation. goto
  744. * is used because __rmqueue_smallest is an inline function
  745. * and we want just one call site
  746. */
  747. if (!page) {
  748. migratetype = MIGRATE_RESERVE;
  749. goto retry_reserve;
  750. }
  751. }
  752. return page;
  753. }
  754. /*
  755. * Obtain a specified number of elements from the buddy allocator, all under
  756. * a single hold of the lock, for efficiency. Add them to the supplied list.
  757. * Returns the number of new pages which were placed at *list.
  758. */
  759. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  760. unsigned long count, struct list_head *list,
  761. int migratetype)
  762. {
  763. int i;
  764. spin_lock(&zone->lock);
  765. for (i = 0; i < count; ++i) {
  766. struct page *page = __rmqueue(zone, order, migratetype);
  767. if (unlikely(page == NULL))
  768. break;
  769. /*
  770. * Split buddy pages returned by expand() are received here
  771. * in physical page order. The page is added to the callers and
  772. * list and the list head then moves forward. From the callers
  773. * perspective, the linked list is ordered by page number in
  774. * some conditions. This is useful for IO devices that can
  775. * merge IO requests if the physical pages are ordered
  776. * properly.
  777. */
  778. list_add(&page->lru, list);
  779. set_page_private(page, migratetype);
  780. list = &page->lru;
  781. }
  782. spin_unlock(&zone->lock);
  783. return i;
  784. }
  785. #ifdef CONFIG_NUMA
  786. /*
  787. * Called from the vmstat counter updater to drain pagesets of this
  788. * currently executing processor on remote nodes after they have
  789. * expired.
  790. *
  791. * Note that this function must be called with the thread pinned to
  792. * a single processor.
  793. */
  794. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  795. {
  796. unsigned long flags;
  797. int to_drain;
  798. local_irq_save(flags);
  799. if (pcp->count >= pcp->batch)
  800. to_drain = pcp->batch;
  801. else
  802. to_drain = pcp->count;
  803. free_pages_bulk(zone, to_drain, &pcp->list, 0);
  804. pcp->count -= to_drain;
  805. local_irq_restore(flags);
  806. }
  807. #endif
  808. /*
  809. * Drain pages of the indicated processor.
  810. *
  811. * The processor must either be the current processor and the
  812. * thread pinned to the current processor or a processor that
  813. * is not online.
  814. */
  815. static void drain_pages(unsigned int cpu)
  816. {
  817. unsigned long flags;
  818. struct zone *zone;
  819. for_each_populated_zone(zone) {
  820. struct per_cpu_pageset *pset;
  821. struct per_cpu_pages *pcp;
  822. pset = zone_pcp(zone, cpu);
  823. pcp = &pset->pcp;
  824. local_irq_save(flags);
  825. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  826. pcp->count = 0;
  827. local_irq_restore(flags);
  828. }
  829. }
  830. /*
  831. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  832. */
  833. void drain_local_pages(void *arg)
  834. {
  835. drain_pages(smp_processor_id());
  836. }
  837. /*
  838. * Spill all the per-cpu pages from all CPUs back into the buddy allocator
  839. */
  840. void drain_all_pages(void)
  841. {
  842. on_each_cpu(drain_local_pages, NULL, 1);
  843. }
  844. #ifdef CONFIG_HIBERNATION
  845. void mark_free_pages(struct zone *zone)
  846. {
  847. unsigned long pfn, max_zone_pfn;
  848. unsigned long flags;
  849. int order, t;
  850. struct list_head *curr;
  851. if (!zone->spanned_pages)
  852. return;
  853. spin_lock_irqsave(&zone->lock, flags);
  854. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  855. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  856. if (pfn_valid(pfn)) {
  857. struct page *page = pfn_to_page(pfn);
  858. if (!swsusp_page_is_forbidden(page))
  859. swsusp_unset_page_free(page);
  860. }
  861. for_each_migratetype_order(order, t) {
  862. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  863. unsigned long i;
  864. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  865. for (i = 0; i < (1UL << order); i++)
  866. swsusp_set_page_free(pfn_to_page(pfn + i));
  867. }
  868. }
  869. spin_unlock_irqrestore(&zone->lock, flags);
  870. }
  871. #endif /* CONFIG_PM */
  872. /*
  873. * Free a 0-order page
  874. */
  875. static void free_hot_cold_page(struct page *page, int cold)
  876. {
  877. struct zone *zone = page_zone(page);
  878. struct per_cpu_pages *pcp;
  879. unsigned long flags;
  880. if (PageAnon(page))
  881. page->mapping = NULL;
  882. if (free_pages_check(page))
  883. return;
  884. if (!PageHighMem(page)) {
  885. debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
  886. debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
  887. }
  888. arch_free_page(page, 0);
  889. kernel_map_pages(page, 1, 0);
  890. pcp = &zone_pcp(zone, get_cpu())->pcp;
  891. local_irq_save(flags);
  892. __count_vm_event(PGFREE);
  893. if (cold)
  894. list_add_tail(&page->lru, &pcp->list);
  895. else
  896. list_add(&page->lru, &pcp->list);
  897. set_page_private(page, get_pageblock_migratetype(page));
  898. pcp->count++;
  899. if (pcp->count >= pcp->high) {
  900. free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
  901. pcp->count -= pcp->batch;
  902. }
  903. local_irq_restore(flags);
  904. put_cpu();
  905. }
  906. void free_hot_page(struct page *page)
  907. {
  908. free_hot_cold_page(page, 0);
  909. }
  910. void free_cold_page(struct page *page)
  911. {
  912. free_hot_cold_page(page, 1);
  913. }
  914. /*
  915. * split_page takes a non-compound higher-order page, and splits it into
  916. * n (1<<order) sub-pages: page[0..n]
  917. * Each sub-page must be freed individually.
  918. *
  919. * Note: this is probably too low level an operation for use in drivers.
  920. * Please consult with lkml before using this in your driver.
  921. */
  922. void split_page(struct page *page, unsigned int order)
  923. {
  924. int i;
  925. VM_BUG_ON(PageCompound(page));
  926. VM_BUG_ON(!page_count(page));
  927. for (i = 1; i < (1 << order); i++)
  928. set_page_refcounted(page + i);
  929. }
  930. /*
  931. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  932. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  933. * or two.
  934. */
  935. static inline
  936. struct page *buffered_rmqueue(struct zone *preferred_zone,
  937. struct zone *zone, int order, gfp_t gfp_flags,
  938. int migratetype)
  939. {
  940. unsigned long flags;
  941. struct page *page;
  942. int cold = !!(gfp_flags & __GFP_COLD);
  943. int cpu;
  944. again:
  945. cpu = get_cpu();
  946. if (likely(order == 0)) {
  947. struct per_cpu_pages *pcp;
  948. pcp = &zone_pcp(zone, cpu)->pcp;
  949. local_irq_save(flags);
  950. if (!pcp->count) {
  951. pcp->count = rmqueue_bulk(zone, 0,
  952. pcp->batch, &pcp->list, migratetype);
  953. if (unlikely(!pcp->count))
  954. goto failed;
  955. }
  956. /* Find a page of the appropriate migrate type */
  957. if (cold) {
  958. list_for_each_entry_reverse(page, &pcp->list, lru)
  959. if (page_private(page) == migratetype)
  960. break;
  961. } else {
  962. list_for_each_entry(page, &pcp->list, lru)
  963. if (page_private(page) == migratetype)
  964. break;
  965. }
  966. /* Allocate more to the pcp list if necessary */
  967. if (unlikely(&page->lru == &pcp->list)) {
  968. pcp->count += rmqueue_bulk(zone, 0,
  969. pcp->batch, &pcp->list, migratetype);
  970. page = list_entry(pcp->list.next, struct page, lru);
  971. }
  972. list_del(&page->lru);
  973. pcp->count--;
  974. } else {
  975. spin_lock_irqsave(&zone->lock, flags);
  976. page = __rmqueue(zone, order, migratetype);
  977. spin_unlock(&zone->lock);
  978. if (!page)
  979. goto failed;
  980. }
  981. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  982. zone_statistics(preferred_zone, zone);
  983. local_irq_restore(flags);
  984. put_cpu();
  985. VM_BUG_ON(bad_range(zone, page));
  986. if (prep_new_page(page, order, gfp_flags))
  987. goto again;
  988. return page;
  989. failed:
  990. local_irq_restore(flags);
  991. put_cpu();
  992. return NULL;
  993. }
  994. #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
  995. #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
  996. #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
  997. #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
  998. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  999. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  1000. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  1001. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1002. static struct fail_page_alloc_attr {
  1003. struct fault_attr attr;
  1004. u32 ignore_gfp_highmem;
  1005. u32 ignore_gfp_wait;
  1006. u32 min_order;
  1007. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1008. struct dentry *ignore_gfp_highmem_file;
  1009. struct dentry *ignore_gfp_wait_file;
  1010. struct dentry *min_order_file;
  1011. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1012. } fail_page_alloc = {
  1013. .attr = FAULT_ATTR_INITIALIZER,
  1014. .ignore_gfp_wait = 1,
  1015. .ignore_gfp_highmem = 1,
  1016. .min_order = 1,
  1017. };
  1018. static int __init setup_fail_page_alloc(char *str)
  1019. {
  1020. return setup_fault_attr(&fail_page_alloc.attr, str);
  1021. }
  1022. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1023. static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1024. {
  1025. if (order < fail_page_alloc.min_order)
  1026. return 0;
  1027. if (gfp_mask & __GFP_NOFAIL)
  1028. return 0;
  1029. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1030. return 0;
  1031. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1032. return 0;
  1033. return should_fail(&fail_page_alloc.attr, 1 << order);
  1034. }
  1035. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1036. static int __init fail_page_alloc_debugfs(void)
  1037. {
  1038. mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1039. struct dentry *dir;
  1040. int err;
  1041. err = init_fault_attr_dentries(&fail_page_alloc.attr,
  1042. "fail_page_alloc");
  1043. if (err)
  1044. return err;
  1045. dir = fail_page_alloc.attr.dentries.dir;
  1046. fail_page_alloc.ignore_gfp_wait_file =
  1047. debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1048. &fail_page_alloc.ignore_gfp_wait);
  1049. fail_page_alloc.ignore_gfp_highmem_file =
  1050. debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1051. &fail_page_alloc.ignore_gfp_highmem);
  1052. fail_page_alloc.min_order_file =
  1053. debugfs_create_u32("min-order", mode, dir,
  1054. &fail_page_alloc.min_order);
  1055. if (!fail_page_alloc.ignore_gfp_wait_file ||
  1056. !fail_page_alloc.ignore_gfp_highmem_file ||
  1057. !fail_page_alloc.min_order_file) {
  1058. err = -ENOMEM;
  1059. debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
  1060. debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
  1061. debugfs_remove(fail_page_alloc.min_order_file);
  1062. cleanup_fault_attr_dentries(&fail_page_alloc.attr);
  1063. }
  1064. return err;
  1065. }
  1066. late_initcall(fail_page_alloc_debugfs);
  1067. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1068. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1069. static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1070. {
  1071. return 0;
  1072. }
  1073. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1074. /*
  1075. * Return 1 if free pages are above 'mark'. This takes into account the order
  1076. * of the allocation.
  1077. */
  1078. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  1079. int classzone_idx, int alloc_flags)
  1080. {
  1081. /* free_pages my go negative - that's OK */
  1082. long min = mark;
  1083. long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
  1084. int o;
  1085. if (alloc_flags & ALLOC_HIGH)
  1086. min -= min / 2;
  1087. if (alloc_flags & ALLOC_HARDER)
  1088. min -= min / 4;
  1089. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  1090. return 0;
  1091. for (o = 0; o < order; o++) {
  1092. /* At the next order, this order's pages become unavailable */
  1093. free_pages -= z->free_area[o].nr_free << o;
  1094. /* Require fewer higher order pages to be free */
  1095. min >>= 1;
  1096. if (free_pages <= min)
  1097. return 0;
  1098. }
  1099. return 1;
  1100. }
  1101. #ifdef CONFIG_NUMA
  1102. /*
  1103. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1104. * skip over zones that are not allowed by the cpuset, or that have
  1105. * been recently (in last second) found to be nearly full. See further
  1106. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1107. * that have to skip over a lot of full or unallowed zones.
  1108. *
  1109. * If the zonelist cache is present in the passed in zonelist, then
  1110. * returns a pointer to the allowed node mask (either the current
  1111. * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
  1112. *
  1113. * If the zonelist cache is not available for this zonelist, does
  1114. * nothing and returns NULL.
  1115. *
  1116. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1117. * a second since last zap'd) then we zap it out (clear its bits.)
  1118. *
  1119. * We hold off even calling zlc_setup, until after we've checked the
  1120. * first zone in the zonelist, on the theory that most allocations will
  1121. * be satisfied from that first zone, so best to examine that zone as
  1122. * quickly as we can.
  1123. */
  1124. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1125. {
  1126. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1127. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1128. zlc = zonelist->zlcache_ptr;
  1129. if (!zlc)
  1130. return NULL;
  1131. if (time_after(jiffies, zlc->last_full_zap + HZ)) {
  1132. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1133. zlc->last_full_zap = jiffies;
  1134. }
  1135. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1136. &cpuset_current_mems_allowed :
  1137. &node_states[N_HIGH_MEMORY];
  1138. return allowednodes;
  1139. }
  1140. /*
  1141. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1142. * if it is worth looking at further for free memory:
  1143. * 1) Check that the zone isn't thought to be full (doesn't have its
  1144. * bit set in the zonelist_cache fullzones BITMAP).
  1145. * 2) Check that the zones node (obtained from the zonelist_cache
  1146. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1147. * Return true (non-zero) if zone is worth looking at further, or
  1148. * else return false (zero) if it is not.
  1149. *
  1150. * This check -ignores- the distinction between various watermarks,
  1151. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1152. * found to be full for any variation of these watermarks, it will
  1153. * be considered full for up to one second by all requests, unless
  1154. * we are so low on memory on all allowed nodes that we are forced
  1155. * into the second scan of the zonelist.
  1156. *
  1157. * In the second scan we ignore this zonelist cache and exactly
  1158. * apply the watermarks to all zones, even it is slower to do so.
  1159. * We are low on memory in the second scan, and should leave no stone
  1160. * unturned looking for a free page.
  1161. */
  1162. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1163. nodemask_t *allowednodes)
  1164. {
  1165. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1166. int i; /* index of *z in zonelist zones */
  1167. int n; /* node that zone *z is on */
  1168. zlc = zonelist->zlcache_ptr;
  1169. if (!zlc)
  1170. return 1;
  1171. i = z - zonelist->_zonerefs;
  1172. n = zlc->z_to_n[i];
  1173. /* This zone is worth trying if it is allowed but not full */
  1174. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1175. }
  1176. /*
  1177. * Given 'z' scanning a zonelist, set the corresponding bit in
  1178. * zlc->fullzones, so that subsequent attempts to allocate a page
  1179. * from that zone don't waste time re-examining it.
  1180. */
  1181. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1182. {
  1183. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1184. int i; /* index of *z in zonelist zones */
  1185. zlc = zonelist->zlcache_ptr;
  1186. if (!zlc)
  1187. return;
  1188. i = z - zonelist->_zonerefs;
  1189. set_bit(i, zlc->fullzones);
  1190. }
  1191. #else /* CONFIG_NUMA */
  1192. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1193. {
  1194. return NULL;
  1195. }
  1196. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1197. nodemask_t *allowednodes)
  1198. {
  1199. return 1;
  1200. }
  1201. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1202. {
  1203. }
  1204. #endif /* CONFIG_NUMA */
  1205. /*
  1206. * get_page_from_freelist goes through the zonelist trying to allocate
  1207. * a page.
  1208. */
  1209. static struct page *
  1210. get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
  1211. struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
  1212. struct zone *preferred_zone, int migratetype)
  1213. {
  1214. struct zoneref *z;
  1215. struct page *page = NULL;
  1216. int classzone_idx;
  1217. struct zone *zone;
  1218. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1219. int zlc_active = 0; /* set if using zonelist_cache */
  1220. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1221. if (WARN_ON_ONCE(order >= MAX_ORDER))
  1222. return NULL;
  1223. classzone_idx = zone_idx(preferred_zone);
  1224. zonelist_scan:
  1225. /*
  1226. * Scan zonelist, looking for a zone with enough free.
  1227. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1228. */
  1229. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1230. high_zoneidx, nodemask) {
  1231. if (NUMA_BUILD && zlc_active &&
  1232. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1233. continue;
  1234. if ((alloc_flags & ALLOC_CPUSET) &&
  1235. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1236. goto try_next_zone;
  1237. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  1238. unsigned long mark;
  1239. if (alloc_flags & ALLOC_WMARK_MIN)
  1240. mark = zone->pages_min;
  1241. else if (alloc_flags & ALLOC_WMARK_LOW)
  1242. mark = zone->pages_low;
  1243. else
  1244. mark = zone->pages_high;
  1245. if (!zone_watermark_ok(zone, order, mark,
  1246. classzone_idx, alloc_flags)) {
  1247. if (!zone_reclaim_mode ||
  1248. !zone_reclaim(zone, gfp_mask, order))
  1249. goto this_zone_full;
  1250. }
  1251. }
  1252. page = buffered_rmqueue(preferred_zone, zone, order,
  1253. gfp_mask, migratetype);
  1254. if (page)
  1255. break;
  1256. this_zone_full:
  1257. if (NUMA_BUILD)
  1258. zlc_mark_zone_full(zonelist, z);
  1259. try_next_zone:
  1260. if (NUMA_BUILD && !did_zlc_setup) {
  1261. /* we do zlc_setup after the first zone is tried */
  1262. allowednodes = zlc_setup(zonelist, alloc_flags);
  1263. zlc_active = 1;
  1264. did_zlc_setup = 1;
  1265. }
  1266. }
  1267. if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
  1268. /* Disable zlc cache for second zonelist scan */
  1269. zlc_active = 0;
  1270. goto zonelist_scan;
  1271. }
  1272. return page;
  1273. }
  1274. static inline int
  1275. should_alloc_retry(gfp_t gfp_mask, unsigned int order,
  1276. unsigned long pages_reclaimed)
  1277. {
  1278. /* Do not loop if specifically requested */
  1279. if (gfp_mask & __GFP_NORETRY)
  1280. return 0;
  1281. /*
  1282. * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
  1283. * means __GFP_NOFAIL, but that may not be true in other
  1284. * implementations.
  1285. */
  1286. if (order <= PAGE_ALLOC_COSTLY_ORDER)
  1287. return 1;
  1288. /*
  1289. * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
  1290. * specified, then we retry until we no longer reclaim any pages
  1291. * (above), or we've reclaimed an order of pages at least as
  1292. * large as the allocation's order. In both cases, if the
  1293. * allocation still fails, we stop retrying.
  1294. */
  1295. if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
  1296. return 1;
  1297. /*
  1298. * Don't let big-order allocations loop unless the caller
  1299. * explicitly requests that.
  1300. */
  1301. if (gfp_mask & __GFP_NOFAIL)
  1302. return 1;
  1303. return 0;
  1304. }
  1305. static inline struct page *
  1306. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  1307. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1308. nodemask_t *nodemask, struct zone *preferred_zone,
  1309. int migratetype)
  1310. {
  1311. struct page *page;
  1312. /* Acquire the OOM killer lock for the zones in zonelist */
  1313. if (!try_set_zone_oom(zonelist, gfp_mask)) {
  1314. schedule_timeout_uninterruptible(1);
  1315. return NULL;
  1316. }
  1317. /*
  1318. * Go through the zonelist yet one more time, keep very high watermark
  1319. * here, this is only to catch a parallel oom killing, we must fail if
  1320. * we're still under heavy pressure.
  1321. */
  1322. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
  1323. order, zonelist, high_zoneidx,
  1324. ALLOC_WMARK_HIGH|ALLOC_CPUSET,
  1325. preferred_zone, migratetype);
  1326. if (page)
  1327. goto out;
  1328. /* The OOM killer will not help higher order allocs */
  1329. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1330. goto out;
  1331. /* Exhausted what can be done so it's blamo time */
  1332. out_of_memory(zonelist, gfp_mask, order);
  1333. out:
  1334. clear_zonelist_oom(zonelist, gfp_mask);
  1335. return page;
  1336. }
  1337. /* The really slow allocator path where we enter direct reclaim */
  1338. static inline struct page *
  1339. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  1340. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1341. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1342. int migratetype, unsigned long *did_some_progress)
  1343. {
  1344. struct page *page = NULL;
  1345. struct reclaim_state reclaim_state;
  1346. struct task_struct *p = current;
  1347. cond_resched();
  1348. /* We now go into synchronous reclaim */
  1349. cpuset_memory_pressure_bump();
  1350. /*
  1351. * The task's cpuset might have expanded its set of allowable nodes
  1352. */
  1353. p->flags |= PF_MEMALLOC;
  1354. lockdep_set_current_reclaim_state(gfp_mask);
  1355. reclaim_state.reclaimed_slab = 0;
  1356. p->reclaim_state = &reclaim_state;
  1357. *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
  1358. p->reclaim_state = NULL;
  1359. lockdep_clear_current_reclaim_state();
  1360. p->flags &= ~PF_MEMALLOC;
  1361. cond_resched();
  1362. if (order != 0)
  1363. drain_all_pages();
  1364. if (likely(*did_some_progress))
  1365. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1366. zonelist, high_zoneidx,
  1367. alloc_flags, preferred_zone,
  1368. migratetype);
  1369. return page;
  1370. }
  1371. /*
  1372. * This is called in the allocator slow-path if the allocation request is of
  1373. * sufficient urgency to ignore watermarks and take other desperate measures
  1374. */
  1375. static inline struct page *
  1376. __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
  1377. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1378. nodemask_t *nodemask, struct zone *preferred_zone,
  1379. int migratetype)
  1380. {
  1381. struct page *page;
  1382. do {
  1383. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1384. zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
  1385. preferred_zone, migratetype);
  1386. if (!page && gfp_mask & __GFP_NOFAIL)
  1387. congestion_wait(WRITE, HZ/50);
  1388. } while (!page && (gfp_mask & __GFP_NOFAIL));
  1389. return page;
  1390. }
  1391. static inline
  1392. void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
  1393. enum zone_type high_zoneidx)
  1394. {
  1395. struct zoneref *z;
  1396. struct zone *zone;
  1397. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
  1398. wakeup_kswapd(zone, order);
  1399. }
  1400. static inline int
  1401. gfp_to_alloc_flags(gfp_t gfp_mask)
  1402. {
  1403. struct task_struct *p = current;
  1404. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  1405. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1406. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  1407. BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
  1408. /*
  1409. * The caller may dip into page reserves a bit more if the caller
  1410. * cannot run direct reclaim, or if the caller has realtime scheduling
  1411. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  1412. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  1413. */
  1414. alloc_flags |= (gfp_mask & __GFP_HIGH);
  1415. if (!wait) {
  1416. alloc_flags |= ALLOC_HARDER;
  1417. /*
  1418. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  1419. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1420. */
  1421. alloc_flags &= ~ALLOC_CPUSET;
  1422. } else if (unlikely(rt_task(p)))
  1423. alloc_flags |= ALLOC_HARDER;
  1424. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  1425. if (!in_interrupt() &&
  1426. ((p->flags & PF_MEMALLOC) ||
  1427. unlikely(test_thread_flag(TIF_MEMDIE))))
  1428. alloc_flags |= ALLOC_NO_WATERMARKS;
  1429. }
  1430. return alloc_flags;
  1431. }
  1432. static inline struct page *
  1433. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  1434. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1435. nodemask_t *nodemask, struct zone *preferred_zone,
  1436. int migratetype)
  1437. {
  1438. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1439. struct page *page = NULL;
  1440. int alloc_flags;
  1441. unsigned long pages_reclaimed = 0;
  1442. unsigned long did_some_progress;
  1443. struct task_struct *p = current;
  1444. /*
  1445. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  1446. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  1447. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  1448. * using a larger set of nodes after it has established that the
  1449. * allowed per node queues are empty and that nodes are
  1450. * over allocated.
  1451. */
  1452. if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  1453. goto nopage;
  1454. wake_all_kswapd(order, zonelist, high_zoneidx);
  1455. /*
  1456. * OK, we're below the kswapd watermark and have kicked background
  1457. * reclaim. Now things get more complex, so set up alloc_flags according
  1458. * to how we want to proceed.
  1459. */
  1460. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  1461. restart:
  1462. /* This is the last chance, in general, before the goto nopage. */
  1463. page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
  1464. high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
  1465. preferred_zone, migratetype);
  1466. if (page)
  1467. goto got_pg;
  1468. rebalance:
  1469. /* Allocate without watermarks if the context allows */
  1470. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  1471. page = __alloc_pages_high_priority(gfp_mask, order,
  1472. zonelist, high_zoneidx, nodemask,
  1473. preferred_zone, migratetype);
  1474. if (page)
  1475. goto got_pg;
  1476. }
  1477. /* Atomic allocations - we can't balance anything */
  1478. if (!wait)
  1479. goto nopage;
  1480. /* Avoid recursion of direct reclaim */
  1481. if (p->flags & PF_MEMALLOC)
  1482. goto nopage;
  1483. /* Try direct reclaim and then allocating */
  1484. page = __alloc_pages_direct_reclaim(gfp_mask, order,
  1485. zonelist, high_zoneidx,
  1486. nodemask,
  1487. alloc_flags, preferred_zone,
  1488. migratetype, &did_some_progress);
  1489. if (page)
  1490. goto got_pg;
  1491. /*
  1492. * If we failed to make any progress reclaiming, then we are
  1493. * running out of options and have to consider going OOM
  1494. */
  1495. if (!did_some_progress) {
  1496. if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  1497. page = __alloc_pages_may_oom(gfp_mask, order,
  1498. zonelist, high_zoneidx,
  1499. nodemask, preferred_zone,
  1500. migratetype);
  1501. if (page)
  1502. goto got_pg;
  1503. /*
  1504. * The OOM killer does not trigger for high-order allocations
  1505. * but if no progress is being made, there are no other
  1506. * options and retrying is unlikely to help
  1507. */
  1508. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1509. goto nopage;
  1510. goto restart;
  1511. }
  1512. }
  1513. /* Check if we should retry the allocation */
  1514. pages_reclaimed += did_some_progress;
  1515. if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
  1516. /* Wait for some write requests to complete then retry */
  1517. congestion_wait(WRITE, HZ/50);
  1518. goto rebalance;
  1519. }
  1520. nopage:
  1521. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  1522. printk(KERN_WARNING "%s: page allocation failure."
  1523. " order:%d, mode:0x%x\n",
  1524. p->comm, order, gfp_mask);
  1525. dump_stack();
  1526. show_mem();
  1527. }
  1528. got_pg:
  1529. return page;
  1530. }
  1531. /*
  1532. * This is the 'heart' of the zoned buddy allocator.
  1533. */
  1534. struct page *
  1535. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  1536. struct zonelist *zonelist, nodemask_t *nodemask)
  1537. {
  1538. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  1539. struct zone *preferred_zone;
  1540. struct page *page;
  1541. int migratetype = allocflags_to_migratetype(gfp_mask);
  1542. lockdep_trace_alloc(gfp_mask);
  1543. might_sleep_if(gfp_mask & __GFP_WAIT);
  1544. if (should_fail_alloc_page(gfp_mask, order))
  1545. return NULL;
  1546. /*
  1547. * Check the zones suitable for the gfp_mask contain at least one
  1548. * valid zone. It's possible to have an empty zonelist as a result
  1549. * of GFP_THISNODE and a memoryless node
  1550. */
  1551. if (unlikely(!zonelist->_zonerefs->zone))
  1552. return NULL;
  1553. /* The preferred zone is used for statistics later */
  1554. first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
  1555. if (!preferred_zone)
  1556. return NULL;
  1557. /* First allocation attempt */
  1558. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  1559. zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
  1560. preferred_zone, migratetype);
  1561. if (unlikely(!page))
  1562. page = __alloc_pages_slowpath(gfp_mask, order,
  1563. zonelist, high_zoneidx, nodemask,
  1564. preferred_zone, migratetype);
  1565. return page;
  1566. }
  1567. EXPORT_SYMBOL(__alloc_pages_nodemask);
  1568. /*
  1569. * Common helper functions.
  1570. */
  1571. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  1572. {
  1573. struct page * page;
  1574. page = alloc_pages(gfp_mask, order);
  1575. if (!page)
  1576. return 0;
  1577. return (unsigned long) page_address(page);
  1578. }
  1579. EXPORT_SYMBOL(__get_free_pages);
  1580. unsigned long get_zeroed_page(gfp_t gfp_mask)
  1581. {
  1582. struct page * page;
  1583. /*
  1584. * get_zeroed_page() returns a 32-bit address, which cannot represent
  1585. * a highmem page
  1586. */
  1587. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  1588. page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
  1589. if (page)
  1590. return (unsigned long) page_address(page);
  1591. return 0;
  1592. }
  1593. EXPORT_SYMBOL(get_zeroed_page);
  1594. void __pagevec_free(struct pagevec *pvec)
  1595. {
  1596. int i = pagevec_count(pvec);
  1597. while (--i >= 0)
  1598. free_hot_cold_page(pvec->pages[i], pvec->cold);
  1599. }
  1600. void __free_pages(struct page *page, unsigned int order)
  1601. {
  1602. if (put_page_testzero(page)) {
  1603. if (order == 0)
  1604. free_hot_page(page);
  1605. else
  1606. __free_pages_ok(page, order);
  1607. }
  1608. }
  1609. EXPORT_SYMBOL(__free_pages);
  1610. void free_pages(unsigned long addr, unsigned int order)
  1611. {
  1612. if (addr != 0) {
  1613. VM_BUG_ON(!virt_addr_valid((void *)addr));
  1614. __free_pages(virt_to_page((void *)addr), order);
  1615. }
  1616. }
  1617. EXPORT_SYMBOL(free_pages);
  1618. /**
  1619. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  1620. * @size: the number of bytes to allocate
  1621. * @gfp_mask: GFP flags for the allocation
  1622. *
  1623. * This function is similar to alloc_pages(), except that it allocates the
  1624. * minimum number of pages to satisfy the request. alloc_pages() can only
  1625. * allocate memory in power-of-two pages.
  1626. *
  1627. * This function is also limited by MAX_ORDER.
  1628. *
  1629. * Memory allocated by this function must be released by free_pages_exact().
  1630. */
  1631. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  1632. {
  1633. unsigned int order = get_order(size);
  1634. unsigned long addr;
  1635. addr = __get_free_pages(gfp_mask, order);
  1636. if (addr) {
  1637. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  1638. unsigned long used = addr + PAGE_ALIGN(size);
  1639. split_page(virt_to_page(addr), order);
  1640. while (used < alloc_end) {
  1641. free_page(used);
  1642. used += PAGE_SIZE;
  1643. }
  1644. }
  1645. return (void *)addr;
  1646. }
  1647. EXPORT_SYMBOL(alloc_pages_exact);
  1648. /**
  1649. * free_pages_exact - release memory allocated via alloc_pages_exact()
  1650. * @virt: the value returned by alloc_pages_exact.
  1651. * @size: size of allocation, same value as passed to alloc_pages_exact().
  1652. *
  1653. * Release the memory allocated by a previous call to alloc_pages_exact.
  1654. */
  1655. void free_pages_exact(void *virt, size_t size)
  1656. {
  1657. unsigned long addr = (unsigned long)virt;
  1658. unsigned long end = addr + PAGE_ALIGN(size);
  1659. while (addr < end) {
  1660. free_page(addr);
  1661. addr += PAGE_SIZE;
  1662. }
  1663. }
  1664. EXPORT_SYMBOL(free_pages_exact);
  1665. static unsigned int nr_free_zone_pages(int offset)
  1666. {
  1667. struct zoneref *z;
  1668. struct zone *zone;
  1669. /* Just pick one node, since fallback list is circular */
  1670. unsigned int sum = 0;
  1671. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  1672. for_each_zone_zonelist(zone, z, zonelist, offset) {
  1673. unsigned long size = zone->present_pages;
  1674. unsigned long high = zone->pages_high;
  1675. if (size > high)
  1676. sum += size - high;
  1677. }
  1678. return sum;
  1679. }
  1680. /*
  1681. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1682. */
  1683. unsigned int nr_free_buffer_pages(void)
  1684. {
  1685. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1686. }
  1687. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  1688. /*
  1689. * Amount of free RAM allocatable within all zones
  1690. */
  1691. unsigned int nr_free_pagecache_pages(void)
  1692. {
  1693. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  1694. }
  1695. static inline void show_node(struct zone *zone)
  1696. {
  1697. if (NUMA_BUILD)
  1698. printk("Node %d ", zone_to_nid(zone));
  1699. }
  1700. void si_meminfo(struct sysinfo *val)
  1701. {
  1702. val->totalram = totalram_pages;
  1703. val->sharedram = 0;
  1704. val->freeram = global_page_state(NR_FREE_PAGES);
  1705. val->bufferram = nr_blockdev_pages();
  1706. val->totalhigh = totalhigh_pages;
  1707. val->freehigh = nr_free_highpages();
  1708. val->mem_unit = PAGE_SIZE;
  1709. }
  1710. EXPORT_SYMBOL(si_meminfo);
  1711. #ifdef CONFIG_NUMA
  1712. void si_meminfo_node(struct sysinfo *val, int nid)
  1713. {
  1714. pg_data_t *pgdat = NODE_DATA(nid);
  1715. val->totalram = pgdat->node_present_pages;
  1716. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  1717. #ifdef CONFIG_HIGHMEM
  1718. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1719. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  1720. NR_FREE_PAGES);
  1721. #else
  1722. val->totalhigh = 0;
  1723. val->freehigh = 0;
  1724. #endif
  1725. val->mem_unit = PAGE_SIZE;
  1726. }
  1727. #endif
  1728. #define K(x) ((x) << (PAGE_SHIFT-10))
  1729. /*
  1730. * Show free area list (used inside shift_scroll-lock stuff)
  1731. * We also calculate the percentage fragmentation. We do this by counting the
  1732. * memory on each free list with the exception of the first item on the list.
  1733. */
  1734. void show_free_areas(void)
  1735. {
  1736. int cpu;
  1737. struct zone *zone;
  1738. for_each_populated_zone(zone) {
  1739. show_node(zone);
  1740. printk("%s per-cpu:\n", zone->name);
  1741. for_each_online_cpu(cpu) {
  1742. struct per_cpu_pageset *pageset;
  1743. pageset = zone_pcp(zone, cpu);
  1744. printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
  1745. cpu, pageset->pcp.high,
  1746. pageset->pcp.batch, pageset->pcp.count);
  1747. }
  1748. }
  1749. printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
  1750. " inactive_file:%lu"
  1751. //TODO: check/adjust line lengths
  1752. #ifdef CONFIG_UNEVICTABLE_LRU
  1753. " unevictable:%lu"
  1754. #endif
  1755. " dirty:%lu writeback:%lu unstable:%lu\n"
  1756. " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
  1757. global_page_state(NR_ACTIVE_ANON),
  1758. global_page_state(NR_ACTIVE_FILE),
  1759. global_page_state(NR_INACTIVE_ANON),
  1760. global_page_state(NR_INACTIVE_FILE),
  1761. #ifdef CONFIG_UNEVICTABLE_LRU
  1762. global_page_state(NR_UNEVICTABLE),
  1763. #endif
  1764. global_page_state(NR_FILE_DIRTY),
  1765. global_page_state(NR_WRITEBACK),
  1766. global_page_state(NR_UNSTABLE_NFS),
  1767. global_page_state(NR_FREE_PAGES),
  1768. global_page_state(NR_SLAB_RECLAIMABLE) +
  1769. global_page_state(NR_SLAB_UNRECLAIMABLE),
  1770. global_page_state(NR_FILE_MAPPED),
  1771. global_page_state(NR_PAGETABLE),
  1772. global_page_state(NR_BOUNCE));
  1773. for_each_populated_zone(zone) {
  1774. int i;
  1775. show_node(zone);
  1776. printk("%s"
  1777. " free:%lukB"
  1778. " min:%lukB"
  1779. " low:%lukB"
  1780. " high:%lukB"
  1781. " active_anon:%lukB"
  1782. " inactive_anon:%lukB"
  1783. " active_file:%lukB"
  1784. " inactive_file:%lukB"
  1785. #ifdef CONFIG_UNEVICTABLE_LRU
  1786. " unevictable:%lukB"
  1787. #endif
  1788. " present:%lukB"
  1789. " pages_scanned:%lu"
  1790. " all_unreclaimable? %s"
  1791. "\n",
  1792. zone->name,
  1793. K(zone_page_state(zone, NR_FREE_PAGES)),
  1794. K(zone->pages_min),
  1795. K(zone->pages_low),
  1796. K(zone->pages_high),
  1797. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  1798. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  1799. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  1800. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  1801. #ifdef CONFIG_UNEVICTABLE_LRU
  1802. K(zone_page_state(zone, NR_UNEVICTABLE)),
  1803. #endif
  1804. K(zone->present_pages),
  1805. zone->pages_scanned,
  1806. (zone_is_all_unreclaimable(zone) ? "yes" : "no")
  1807. );
  1808. printk("lowmem_reserve[]:");
  1809. for (i = 0; i < MAX_NR_ZONES; i++)
  1810. printk(" %lu", zone->lowmem_reserve[i]);
  1811. printk("\n");
  1812. }
  1813. for_each_populated_zone(zone) {
  1814. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  1815. show_node(zone);
  1816. printk("%s: ", zone->name);
  1817. spin_lock_irqsave(&zone->lock, flags);
  1818. for (order = 0; order < MAX_ORDER; order++) {
  1819. nr[order] = zone->free_area[order].nr_free;
  1820. total += nr[order] << order;
  1821. }
  1822. spin_unlock_irqrestore(&zone->lock, flags);
  1823. for (order = 0; order < MAX_ORDER; order++)
  1824. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  1825. printk("= %lukB\n", K(total));
  1826. }
  1827. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  1828. show_swap_cache_info();
  1829. }
  1830. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  1831. {
  1832. zoneref->zone = zone;
  1833. zoneref->zone_idx = zone_idx(zone);
  1834. }
  1835. /*
  1836. * Builds allocation fallback zone lists.
  1837. *
  1838. * Add all populated zones of a node to the zonelist.
  1839. */
  1840. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  1841. int nr_zones, enum zone_type zone_type)
  1842. {
  1843. struct zone *zone;
  1844. BUG_ON(zone_type >= MAX_NR_ZONES);
  1845. zone_type++;
  1846. do {
  1847. zone_type--;
  1848. zone = pgdat->node_zones + zone_type;
  1849. if (populated_zone(zone)) {
  1850. zoneref_set_zone(zone,
  1851. &zonelist->_zonerefs[nr_zones++]);
  1852. check_highest_zone(zone_type);
  1853. }
  1854. } while (zone_type);
  1855. return nr_zones;
  1856. }
  1857. /*
  1858. * zonelist_order:
  1859. * 0 = automatic detection of better ordering.
  1860. * 1 = order by ([node] distance, -zonetype)
  1861. * 2 = order by (-zonetype, [node] distance)
  1862. *
  1863. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  1864. * the same zonelist. So only NUMA can configure this param.
  1865. */
  1866. #define ZONELIST_ORDER_DEFAULT 0
  1867. #define ZONELIST_ORDER_NODE 1
  1868. #define ZONELIST_ORDER_ZONE 2
  1869. /* zonelist order in the kernel.
  1870. * set_zonelist_order() will set this to NODE or ZONE.
  1871. */
  1872. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1873. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  1874. #ifdef CONFIG_NUMA
  1875. /* The value user specified ....changed by config */
  1876. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1877. /* string for sysctl */
  1878. #define NUMA_ZONELIST_ORDER_LEN 16
  1879. char numa_zonelist_order[16] = "default";
  1880. /*
  1881. * interface for configure zonelist ordering.
  1882. * command line option "numa_zonelist_order"
  1883. * = "[dD]efault - default, automatic configuration.
  1884. * = "[nN]ode - order by node locality, then by zone within node
  1885. * = "[zZ]one - order by zone, then by locality within zone
  1886. */
  1887. static int __parse_numa_zonelist_order(char *s)
  1888. {
  1889. if (*s == 'd' || *s == 'D') {
  1890. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1891. } else if (*s == 'n' || *s == 'N') {
  1892. user_zonelist_order = ZONELIST_ORDER_NODE;
  1893. } else if (*s == 'z' || *s == 'Z') {
  1894. user_zonelist_order = ZONELIST_ORDER_ZONE;
  1895. } else {
  1896. printk(KERN_WARNING
  1897. "Ignoring invalid numa_zonelist_order value: "
  1898. "%s\n", s);
  1899. return -EINVAL;
  1900. }
  1901. return 0;
  1902. }
  1903. static __init int setup_numa_zonelist_order(char *s)
  1904. {
  1905. if (s)
  1906. return __parse_numa_zonelist_order(s);
  1907. return 0;
  1908. }
  1909. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  1910. /*
  1911. * sysctl handler for numa_zonelist_order
  1912. */
  1913. int numa_zonelist_order_handler(ctl_table *table, int write,
  1914. struct file *file, void __user *buffer, size_t *length,
  1915. loff_t *ppos)
  1916. {
  1917. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  1918. int ret;
  1919. if (write)
  1920. strncpy(saved_string, (char*)table->data,
  1921. NUMA_ZONELIST_ORDER_LEN);
  1922. ret = proc_dostring(table, write, file, buffer, length, ppos);
  1923. if (ret)
  1924. return ret;
  1925. if (write) {
  1926. int oldval = user_zonelist_order;
  1927. if (__parse_numa_zonelist_order((char*)table->data)) {
  1928. /*
  1929. * bogus value. restore saved string
  1930. */
  1931. strncpy((char*)table->data, saved_string,
  1932. NUMA_ZONELIST_ORDER_LEN);
  1933. user_zonelist_order = oldval;
  1934. } else if (oldval != user_zonelist_order)
  1935. build_all_zonelists();
  1936. }
  1937. return 0;
  1938. }
  1939. #define MAX_NODE_LOAD (num_online_nodes())
  1940. static int node_load[MAX_NUMNODES];
  1941. /**
  1942. * find_next_best_node - find the next node that should appear in a given node's fallback list
  1943. * @node: node whose fallback list we're appending
  1944. * @used_node_mask: nodemask_t of already used nodes
  1945. *
  1946. * We use a number of factors to determine which is the next node that should
  1947. * appear on a given node's fallback list. The node should not have appeared
  1948. * already in @node's fallback list, and it should be the next closest node
  1949. * according to the distance array (which contains arbitrary distance values
  1950. * from each node to each node in the system), and should also prefer nodes
  1951. * with no CPUs, since presumably they'll have very little allocation pressure
  1952. * on them otherwise.
  1953. * It returns -1 if no node is found.
  1954. */
  1955. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  1956. {
  1957. int n, val;
  1958. int min_val = INT_MAX;
  1959. int best_node = -1;
  1960. const struct cpumask *tmp = cpumask_of_node(0);
  1961. /* Use the local node if we haven't already */
  1962. if (!node_isset(node, *used_node_mask)) {
  1963. node_set(node, *used_node_mask);
  1964. return node;
  1965. }
  1966. for_each_node_state(n, N_HIGH_MEMORY) {
  1967. /* Don't want a node to appear more than once */
  1968. if (node_isset(n, *used_node_mask))
  1969. continue;
  1970. /* Use the distance array to find the distance */
  1971. val = node_distance(node, n);
  1972. /* Penalize nodes under us ("prefer the next node") */
  1973. val += (n < node);
  1974. /* Give preference to headless and unused nodes */
  1975. tmp = cpumask_of_node(n);
  1976. if (!cpumask_empty(tmp))
  1977. val += PENALTY_FOR_NODE_WITH_CPUS;
  1978. /* Slight preference for less loaded node */
  1979. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  1980. val += node_load[n];
  1981. if (val < min_val) {
  1982. min_val = val;
  1983. best_node = n;
  1984. }
  1985. }
  1986. if (best_node >= 0)
  1987. node_set(best_node, *used_node_mask);
  1988. return best_node;
  1989. }
  1990. /*
  1991. * Build zonelists ordered by node and zones within node.
  1992. * This results in maximum locality--normal zone overflows into local
  1993. * DMA zone, if any--but risks exhausting DMA zone.
  1994. */
  1995. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  1996. {
  1997. int j;
  1998. struct zonelist *zonelist;
  1999. zonelist = &pgdat->node_zonelists[0];
  2000. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  2001. ;
  2002. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2003. MAX_NR_ZONES - 1);
  2004. zonelist->_zonerefs[j].zone = NULL;
  2005. zonelist->_zonerefs[j].zone_idx = 0;
  2006. }
  2007. /*
  2008. * Build gfp_thisnode zonelists
  2009. */
  2010. static void build_thisnode_zonelists(pg_data_t *pgdat)
  2011. {
  2012. int j;
  2013. struct zonelist *zonelist;
  2014. zonelist = &pgdat->node_zonelists[1];
  2015. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2016. zonelist->_zonerefs[j].zone = NULL;
  2017. zonelist->_zonerefs[j].zone_idx = 0;
  2018. }
  2019. /*
  2020. * Build zonelists ordered by zone and nodes within zones.
  2021. * This results in conserving DMA zone[s] until all Normal memory is
  2022. * exhausted, but results in overflowing to remote node while memory
  2023. * may still exist in local DMA zone.
  2024. */
  2025. static int node_order[MAX_NUMNODES];
  2026. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  2027. {
  2028. int pos, j, node;
  2029. int zone_type; /* needs to be signed */
  2030. struct zone *z;
  2031. struct zonelist *zonelist;
  2032. zonelist = &pgdat->node_zonelists[0];
  2033. pos = 0;
  2034. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  2035. for (j = 0; j < nr_nodes; j++) {
  2036. node = node_order[j];
  2037. z = &NODE_DATA(node)->node_zones[zone_type];
  2038. if (populated_zone(z)) {
  2039. zoneref_set_zone(z,
  2040. &zonelist->_zonerefs[pos++]);
  2041. check_highest_zone(zone_type);
  2042. }
  2043. }
  2044. }
  2045. zonelist->_zonerefs[pos].zone = NULL;
  2046. zonelist->_zonerefs[pos].zone_idx = 0;
  2047. }
  2048. static int default_zonelist_order(void)
  2049. {
  2050. int nid, zone_type;
  2051. unsigned long low_kmem_size,total_size;
  2052. struct zone *z;
  2053. int average_size;
  2054. /*
  2055. * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
  2056. * If they are really small and used heavily, the system can fall
  2057. * into OOM very easily.
  2058. * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
  2059. */
  2060. /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
  2061. low_kmem_size = 0;
  2062. total_size = 0;
  2063. for_each_online_node(nid) {
  2064. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2065. z = &NODE_DATA(nid)->node_zones[zone_type];
  2066. if (populated_zone(z)) {
  2067. if (zone_type < ZONE_NORMAL)
  2068. low_kmem_size += z->present_pages;
  2069. total_size += z->present_pages;
  2070. }
  2071. }
  2072. }
  2073. if (!low_kmem_size || /* there are no DMA area. */
  2074. low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
  2075. return ZONELIST_ORDER_NODE;
  2076. /*
  2077. * look into each node's config.
  2078. * If there is a node whose DMA/DMA32 memory is very big area on
  2079. * local memory, NODE_ORDER may be suitable.
  2080. */
  2081. average_size = total_size /
  2082. (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
  2083. for_each_online_node(nid) {
  2084. low_kmem_size = 0;
  2085. total_size = 0;
  2086. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2087. z = &NODE_DATA(nid)->node_zones[zone_type];
  2088. if (populated_zone(z)) {
  2089. if (zone_type < ZONE_NORMAL)
  2090. low_kmem_size += z->present_pages;
  2091. total_size += z->present_pages;
  2092. }
  2093. }
  2094. if (low_kmem_size &&
  2095. total_size > average_size && /* ignore small node */
  2096. low_kmem_size > total_size * 70/100)
  2097. return ZONELIST_ORDER_NODE;
  2098. }
  2099. return ZONELIST_ORDER_ZONE;
  2100. }
  2101. static void set_zonelist_order(void)
  2102. {
  2103. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  2104. current_zonelist_order = default_zonelist_order();
  2105. else
  2106. current_zonelist_order = user_zonelist_order;
  2107. }
  2108. static void build_zonelists(pg_data_t *pgdat)
  2109. {
  2110. int j, node, load;
  2111. enum zone_type i;
  2112. nodemask_t used_mask;
  2113. int local_node, prev_node;
  2114. struct zonelist *zonelist;
  2115. int order = current_zonelist_order;
  2116. /* initialize zonelists */
  2117. for (i = 0; i < MAX_ZONELISTS; i++) {
  2118. zonelist = pgdat->node_zonelists + i;
  2119. zonelist->_zonerefs[0].zone = NULL;
  2120. zonelist->_zonerefs[0].zone_idx = 0;
  2121. }
  2122. /* NUMA-aware ordering of nodes */
  2123. local_node = pgdat->node_id;
  2124. load = num_online_nodes();
  2125. prev_node = local_node;
  2126. nodes_clear(used_mask);
  2127. memset(node_load, 0, sizeof(node_load));
  2128. memset(node_order, 0, sizeof(node_order));
  2129. j = 0;
  2130. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  2131. int distance = node_distance(local_node, node);
  2132. /*
  2133. * If another node is sufficiently far away then it is better
  2134. * to reclaim pages in a zone before going off node.
  2135. */
  2136. if (distance > RECLAIM_DISTANCE)
  2137. zone_reclaim_mode = 1;
  2138. /*
  2139. * We don't want to pressure a particular node.
  2140. * So adding penalty to the first node in same
  2141. * distance group to make it round-robin.
  2142. */
  2143. if (distance != node_distance(local_node, prev_node))
  2144. node_load[node] = load;
  2145. prev_node = node;
  2146. load--;
  2147. if (order == ZONELIST_ORDER_NODE)
  2148. build_zonelists_in_node_order(pgdat, node);
  2149. else
  2150. node_order[j++] = node; /* remember order */
  2151. }
  2152. if (order == ZONELIST_ORDER_ZONE) {
  2153. /* calculate node order -- i.e., DMA last! */
  2154. build_zonelists_in_zone_order(pgdat, j);
  2155. }
  2156. build_thisnode_zonelists(pgdat);
  2157. }
  2158. /* Construct the zonelist performance cache - see further mmzone.h */
  2159. static void build_zonelist_cache(pg_data_t *pgdat)
  2160. {
  2161. struct zonelist *zonelist;
  2162. struct zonelist_cache *zlc;
  2163. struct zoneref *z;
  2164. zonelist = &pgdat->node_zonelists[0];
  2165. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  2166. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  2167. for (z = zonelist->_zonerefs; z->zone; z++)
  2168. zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
  2169. }
  2170. #else /* CONFIG_NUMA */
  2171. static void set_zonelist_order(void)
  2172. {
  2173. current_zonelist_order = ZONELIST_ORDER_ZONE;
  2174. }
  2175. static void build_zonelists(pg_data_t *pgdat)
  2176. {
  2177. int node, local_node;
  2178. enum zone_type j;
  2179. struct zonelist *zonelist;
  2180. local_node = pgdat->node_id;
  2181. zonelist = &pgdat->node_zonelists[0];
  2182. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2183. /*
  2184. * Now we build the zonelist so that it contains the zones
  2185. * of all the other nodes.
  2186. * We don't want to pressure a particular node, so when
  2187. * building the zones for node N, we make sure that the
  2188. * zones coming right after the local ones are those from
  2189. * node N+1 (modulo N)
  2190. */
  2191. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  2192. if (!node_online(node))
  2193. continue;
  2194. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2195. MAX_NR_ZONES - 1);
  2196. }
  2197. for (node = 0; node < local_node; node++) {
  2198. if (!node_online(node))
  2199. continue;
  2200. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2201. MAX_NR_ZONES - 1);
  2202. }
  2203. zonelist->_zonerefs[j].zone = NULL;
  2204. zonelist->_zonerefs[j].zone_idx = 0;
  2205. }
  2206. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  2207. static void build_zonelist_cache(pg_data_t *pgdat)
  2208. {
  2209. pgdat->node_zonelists[0].zlcache_ptr = NULL;
  2210. }
  2211. #endif /* CONFIG_NUMA */
  2212. /* return values int ....just for stop_machine() */
  2213. static int __build_all_zonelists(void *dummy)
  2214. {
  2215. int nid;
  2216. for_each_online_node(nid) {
  2217. pg_data_t *pgdat = NODE_DATA(nid);
  2218. build_zonelists(pgdat);
  2219. build_zonelist_cache(pgdat);
  2220. }
  2221. return 0;
  2222. }
  2223. void build_all_zonelists(void)
  2224. {
  2225. set_zonelist_order();
  2226. if (system_state == SYSTEM_BOOTING) {
  2227. __build_all_zonelists(NULL);
  2228. mminit_verify_zonelist();
  2229. cpuset_init_current_mems_allowed();
  2230. } else {
  2231. /* we have to stop all cpus to guarantee there is no user
  2232. of zonelist */
  2233. stop_machine(__build_all_zonelists, NULL, NULL);
  2234. /* cpuset refresh routine should be here */
  2235. }
  2236. vm_total_pages = nr_free_pagecache_pages();
  2237. /*
  2238. * Disable grouping by mobility if the number of pages in the
  2239. * system is too low to allow the mechanism to work. It would be
  2240. * more accurate, but expensive to check per-zone. This check is
  2241. * made on memory-hotadd so a system can start with mobility
  2242. * disabled and enable it later
  2243. */
  2244. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  2245. page_group_by_mobility_disabled = 1;
  2246. else
  2247. page_group_by_mobility_disabled = 0;
  2248. printk("Built %i zonelists in %s order, mobility grouping %s. "
  2249. "Total pages: %ld\n",
  2250. num_online_nodes(),
  2251. zonelist_order_name[current_zonelist_order],
  2252. page_group_by_mobility_disabled ? "off" : "on",
  2253. vm_total_pages);
  2254. #ifdef CONFIG_NUMA
  2255. printk("Policy zone: %s\n", zone_names[policy_zone]);
  2256. #endif
  2257. }
  2258. /*
  2259. * Helper functions to size the waitqueue hash table.
  2260. * Essentially these want to choose hash table sizes sufficiently
  2261. * large so that collisions trying to wait on pages are rare.
  2262. * But in fact, the number of active page waitqueues on typical
  2263. * systems is ridiculously low, less than 200. So this is even
  2264. * conservative, even though it seems large.
  2265. *
  2266. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  2267. * waitqueues, i.e. the size of the waitq table given the number of pages.
  2268. */
  2269. #define PAGES_PER_WAITQUEUE 256
  2270. #ifndef CONFIG_MEMORY_HOTPLUG
  2271. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2272. {
  2273. unsigned long size = 1;
  2274. pages /= PAGES_PER_WAITQUEUE;
  2275. while (size < pages)
  2276. size <<= 1;
  2277. /*
  2278. * Once we have dozens or even hundreds of threads sleeping
  2279. * on IO we've got bigger problems than wait queue collision.
  2280. * Limit the size of the wait table to a reasonable size.
  2281. */
  2282. size = min(size, 4096UL);
  2283. return max(size, 4UL);
  2284. }
  2285. #else
  2286. /*
  2287. * A zone's size might be changed by hot-add, so it is not possible to determine
  2288. * a suitable size for its wait_table. So we use the maximum size now.
  2289. *
  2290. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  2291. *
  2292. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  2293. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  2294. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  2295. *
  2296. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  2297. * or more by the traditional way. (See above). It equals:
  2298. *
  2299. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  2300. * ia64(16K page size) : = ( 8G + 4M)byte.
  2301. * powerpc (64K page size) : = (32G +16M)byte.
  2302. */
  2303. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2304. {
  2305. return 4096UL;
  2306. }
  2307. #endif
  2308. /*
  2309. * This is an integer logarithm so that shifts can be used later
  2310. * to extract the more random high bits from the multiplicative
  2311. * hash function before the remainder is taken.
  2312. */
  2313. static inline unsigned long wait_table_bits(unsigned long size)
  2314. {
  2315. return ffz(~size);
  2316. }
  2317. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  2318. /*
  2319. * Mark a number of pageblocks as MIGRATE_RESERVE. The number
  2320. * of blocks reserved is based on zone->pages_min. The memory within the
  2321. * reserve will tend to store contiguous free pages. Setting min_free_kbytes
  2322. * higher will lead to a bigger reserve which will get freed as contiguous
  2323. * blocks as reclaim kicks in
  2324. */
  2325. static void setup_zone_migrate_reserve(struct zone *zone)
  2326. {
  2327. unsigned long start_pfn, pfn, end_pfn;
  2328. struct page *page;
  2329. unsigned long reserve, block_migratetype;
  2330. /* Get the start pfn, end pfn and the number of blocks to reserve */
  2331. start_pfn = zone->zone_start_pfn;
  2332. end_pfn = start_pfn + zone->spanned_pages;
  2333. reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
  2334. pageblock_order;
  2335. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  2336. if (!pfn_valid(pfn))
  2337. continue;
  2338. page = pfn_to_page(pfn);
  2339. /* Watch out for overlapping nodes */
  2340. if (page_to_nid(page) != zone_to_nid(zone))
  2341. continue;
  2342. /* Blocks with reserved pages will never free, skip them. */
  2343. if (PageReserved(page))
  2344. continue;
  2345. block_migratetype = get_pageblock_migratetype(page);
  2346. /* If this block is reserved, account for it */
  2347. if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
  2348. reserve--;
  2349. continue;
  2350. }
  2351. /* Suitable for reserving if this block is movable */
  2352. if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
  2353. set_pageblock_migratetype(page, MIGRATE_RESERVE);
  2354. move_freepages_block(zone, page, MIGRATE_RESERVE);
  2355. reserve--;
  2356. continue;
  2357. }
  2358. /*
  2359. * If the reserve is met and this is a previous reserved block,
  2360. * take it back
  2361. */
  2362. if (block_migratetype == MIGRATE_RESERVE) {
  2363. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2364. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  2365. }
  2366. }
  2367. }
  2368. /*
  2369. * Initially all pages are reserved - free ones are freed
  2370. * up by free_all_bootmem() once the early boot process is
  2371. * done. Non-atomic initialization, single-pass.
  2372. */
  2373. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  2374. unsigned long start_pfn, enum memmap_context context)
  2375. {
  2376. struct page *page;
  2377. unsigned long end_pfn = start_pfn + size;
  2378. unsigned long pfn;
  2379. struct zone *z;
  2380. if (highest_memmap_pfn < end_pfn - 1)
  2381. highest_memmap_pfn = end_pfn - 1;
  2382. z = &NODE_DATA(nid)->node_zones[zone];
  2383. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  2384. /*
  2385. * There can be holes in boot-time mem_map[]s
  2386. * handed to this function. They do not
  2387. * exist on hotplugged memory.
  2388. */
  2389. if (context == MEMMAP_EARLY) {
  2390. if (!early_pfn_valid(pfn))
  2391. continue;
  2392. if (!early_pfn_in_nid(pfn, nid))
  2393. continue;
  2394. }
  2395. page = pfn_to_page(pfn);
  2396. set_page_links(page, zone, nid, pfn);
  2397. mminit_verify_page_links(page, zone, nid, pfn);
  2398. init_page_count(page);
  2399. reset_page_mapcount(page);
  2400. SetPageReserved(page);
  2401. /*
  2402. * Mark the block movable so that blocks are reserved for
  2403. * movable at startup. This will force kernel allocations
  2404. * to reserve their blocks rather than leaking throughout
  2405. * the address space during boot when many long-lived
  2406. * kernel allocations are made. Later some blocks near
  2407. * the start are marked MIGRATE_RESERVE by
  2408. * setup_zone_migrate_reserve()
  2409. *
  2410. * bitmap is created for zone's valid pfn range. but memmap
  2411. * can be created for invalid pages (for alignment)
  2412. * check here not to call set_pageblock_migratetype() against
  2413. * pfn out of zone.
  2414. */
  2415. if ((z->zone_start_pfn <= pfn)
  2416. && (pfn < z->zone_start_pfn + z->spanned_pages)
  2417. && !(pfn & (pageblock_nr_pages - 1)))
  2418. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2419. INIT_LIST_HEAD(&page->lru);
  2420. #ifdef WANT_PAGE_VIRTUAL
  2421. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  2422. if (!is_highmem_idx(zone))
  2423. set_page_address(page, __va(pfn << PAGE_SHIFT));
  2424. #endif
  2425. }
  2426. }
  2427. static void __meminit zone_init_free_lists(struct zone *zone)
  2428. {
  2429. int order, t;
  2430. for_each_migratetype_order(order, t) {
  2431. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  2432. zone->free_area[order].nr_free = 0;
  2433. }
  2434. }
  2435. #ifndef __HAVE_ARCH_MEMMAP_INIT
  2436. #define memmap_init(size, nid, zone, start_pfn) \
  2437. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  2438. #endif
  2439. static int zone_batchsize(struct zone *zone)
  2440. {
  2441. #ifdef CONFIG_MMU
  2442. int batch;
  2443. /*
  2444. * The per-cpu-pages pools are set to around 1000th of the
  2445. * size of the zone. But no more than 1/2 of a meg.
  2446. *
  2447. * OK, so we don't know how big the cache is. So guess.
  2448. */
  2449. batch = zone->present_pages / 1024;
  2450. if (batch * PAGE_SIZE > 512 * 1024)
  2451. batch = (512 * 1024) / PAGE_SIZE;
  2452. batch /= 4; /* We effectively *= 4 below */
  2453. if (batch < 1)
  2454. batch = 1;
  2455. /*
  2456. * Clamp the batch to a 2^n - 1 value. Having a power
  2457. * of 2 value was found to be more likely to have
  2458. * suboptimal cache aliasing properties in some cases.
  2459. *
  2460. * For example if 2 tasks are alternately allocating
  2461. * batches of pages, one task can end up with a lot
  2462. * of pages of one half of the possible page colors
  2463. * and the other with pages of the other colors.
  2464. */
  2465. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  2466. return batch;
  2467. #else
  2468. /* The deferral and batching of frees should be suppressed under NOMMU
  2469. * conditions.
  2470. *
  2471. * The problem is that NOMMU needs to be able to allocate large chunks
  2472. * of contiguous memory as there's no hardware page translation to
  2473. * assemble apparent contiguous memory from discontiguous pages.
  2474. *
  2475. * Queueing large contiguous runs of pages for batching, however,
  2476. * causes the pages to actually be freed in smaller chunks. As there
  2477. * can be a significant delay between the individual batches being
  2478. * recycled, this leads to the once large chunks of space being
  2479. * fragmented and becoming unavailable for high-order allocations.
  2480. */
  2481. return 0;
  2482. #endif
  2483. }
  2484. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  2485. {
  2486. struct per_cpu_pages *pcp;
  2487. memset(p, 0, sizeof(*p));
  2488. pcp = &p->pcp;
  2489. pcp->count = 0;
  2490. pcp->high = 6 * batch;
  2491. pcp->batch = max(1UL, 1 * batch);
  2492. INIT_LIST_HEAD(&pcp->list);
  2493. }
  2494. /*
  2495. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  2496. * to the value high for the pageset p.
  2497. */
  2498. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  2499. unsigned long high)
  2500. {
  2501. struct per_cpu_pages *pcp;
  2502. pcp = &p->pcp;
  2503. pcp->high = high;
  2504. pcp->batch = max(1UL, high/4);
  2505. if ((high/4) > (PAGE_SHIFT * 8))
  2506. pcp->batch = PAGE_SHIFT * 8;
  2507. }
  2508. #ifdef CONFIG_NUMA
  2509. /*
  2510. * Boot pageset table. One per cpu which is going to be used for all
  2511. * zones and all nodes. The parameters will be set in such a way
  2512. * that an item put on a list will immediately be handed over to
  2513. * the buddy list. This is safe since pageset manipulation is done
  2514. * with interrupts disabled.
  2515. *
  2516. * Some NUMA counter updates may also be caught by the boot pagesets.
  2517. *
  2518. * The boot_pagesets must be kept even after bootup is complete for
  2519. * unused processors and/or zones. They do play a role for bootstrapping
  2520. * hotplugged processors.
  2521. *
  2522. * zoneinfo_show() and maybe other functions do
  2523. * not check if the processor is online before following the pageset pointer.
  2524. * Other parts of the kernel may not check if the zone is available.
  2525. */
  2526. static struct per_cpu_pageset boot_pageset[NR_CPUS];
  2527. /*
  2528. * Dynamically allocate memory for the
  2529. * per cpu pageset array in struct zone.
  2530. */
  2531. static int __cpuinit process_zones(int cpu)
  2532. {
  2533. struct zone *zone, *dzone;
  2534. int node = cpu_to_node(cpu);
  2535. node_set_state(node, N_CPU); /* this node has a cpu */
  2536. for_each_populated_zone(zone) {
  2537. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  2538. GFP_KERNEL, node);
  2539. if (!zone_pcp(zone, cpu))
  2540. goto bad;
  2541. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  2542. if (percpu_pagelist_fraction)
  2543. setup_pagelist_highmark(zone_pcp(zone, cpu),
  2544. (zone->present_pages / percpu_pagelist_fraction));
  2545. }
  2546. return 0;
  2547. bad:
  2548. for_each_zone(dzone) {
  2549. if (!populated_zone(dzone))
  2550. continue;
  2551. if (dzone == zone)
  2552. break;
  2553. kfree(zone_pcp(dzone, cpu));
  2554. zone_pcp(dzone, cpu) = NULL;
  2555. }
  2556. return -ENOMEM;
  2557. }
  2558. static inline void free_zone_pagesets(int cpu)
  2559. {
  2560. struct zone *zone;
  2561. for_each_zone(zone) {
  2562. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  2563. /* Free per_cpu_pageset if it is slab allocated */
  2564. if (pset != &boot_pageset[cpu])
  2565. kfree(pset);
  2566. zone_pcp(zone, cpu) = NULL;
  2567. }
  2568. }
  2569. static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
  2570. unsigned long action,
  2571. void *hcpu)
  2572. {
  2573. int cpu = (long)hcpu;
  2574. int ret = NOTIFY_OK;
  2575. switch (action) {
  2576. case CPU_UP_PREPARE:
  2577. case CPU_UP_PREPARE_FROZEN:
  2578. if (process_zones(cpu))
  2579. ret = NOTIFY_BAD;
  2580. break;
  2581. case CPU_UP_CANCELED:
  2582. case CPU_UP_CANCELED_FROZEN:
  2583. case CPU_DEAD:
  2584. case CPU_DEAD_FROZEN:
  2585. free_zone_pagesets(cpu);
  2586. break;
  2587. default:
  2588. break;
  2589. }
  2590. return ret;
  2591. }
  2592. static struct notifier_block __cpuinitdata pageset_notifier =
  2593. { &pageset_cpuup_callback, NULL, 0 };
  2594. void __init setup_per_cpu_pageset(void)
  2595. {
  2596. int err;
  2597. /* Initialize per_cpu_pageset for cpu 0.
  2598. * A cpuup callback will do this for every cpu
  2599. * as it comes online
  2600. */
  2601. err = process_zones(smp_processor_id());
  2602. BUG_ON(err);
  2603. register_cpu_notifier(&pageset_notifier);
  2604. }
  2605. #endif
  2606. static noinline __init_refok
  2607. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  2608. {
  2609. int i;
  2610. struct pglist_data *pgdat = zone->zone_pgdat;
  2611. size_t alloc_size;
  2612. /*
  2613. * The per-page waitqueue mechanism uses hashed waitqueues
  2614. * per zone.
  2615. */
  2616. zone->wait_table_hash_nr_entries =
  2617. wait_table_hash_nr_entries(zone_size_pages);
  2618. zone->wait_table_bits =
  2619. wait_table_bits(zone->wait_table_hash_nr_entries);
  2620. alloc_size = zone->wait_table_hash_nr_entries
  2621. * sizeof(wait_queue_head_t);
  2622. if (!slab_is_available()) {
  2623. zone->wait_table = (wait_queue_head_t *)
  2624. alloc_bootmem_node(pgdat, alloc_size);
  2625. } else {
  2626. /*
  2627. * This case means that a zone whose size was 0 gets new memory
  2628. * via memory hot-add.
  2629. * But it may be the case that a new node was hot-added. In
  2630. * this case vmalloc() will not be able to use this new node's
  2631. * memory - this wait_table must be initialized to use this new
  2632. * node itself as well.
  2633. * To use this new node's memory, further consideration will be
  2634. * necessary.
  2635. */
  2636. zone->wait_table = vmalloc(alloc_size);
  2637. }
  2638. if (!zone->wait_table)
  2639. return -ENOMEM;
  2640. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  2641. init_waitqueue_head(zone->wait_table + i);
  2642. return 0;
  2643. }
  2644. static __meminit void zone_pcp_init(struct zone *zone)
  2645. {
  2646. int cpu;
  2647. unsigned long batch = zone_batchsize(zone);
  2648. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  2649. #ifdef CONFIG_NUMA
  2650. /* Early boot. Slab allocator not functional yet */
  2651. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  2652. setup_pageset(&boot_pageset[cpu],0);
  2653. #else
  2654. setup_pageset(zone_pcp(zone,cpu), batch);
  2655. #endif
  2656. }
  2657. if (zone->present_pages)
  2658. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  2659. zone->name, zone->present_pages, batch);
  2660. }
  2661. __meminit int init_currently_empty_zone(struct zone *zone,
  2662. unsigned long zone_start_pfn,
  2663. unsigned long size,
  2664. enum memmap_context context)
  2665. {
  2666. struct pglist_data *pgdat = zone->zone_pgdat;
  2667. int ret;
  2668. ret = zone_wait_table_init(zone, size);
  2669. if (ret)
  2670. return ret;
  2671. pgdat->nr_zones = zone_idx(zone) + 1;
  2672. zone->zone_start_pfn = zone_start_pfn;
  2673. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  2674. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  2675. pgdat->node_id,
  2676. (unsigned long)zone_idx(zone),
  2677. zone_start_pfn, (zone_start_pfn + size));
  2678. zone_init_free_lists(zone);
  2679. return 0;
  2680. }
  2681. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2682. /*
  2683. * Basic iterator support. Return the first range of PFNs for a node
  2684. * Note: nid == MAX_NUMNODES returns first region regardless of node
  2685. */
  2686. static int __meminit first_active_region_index_in_nid(int nid)
  2687. {
  2688. int i;
  2689. for (i = 0; i < nr_nodemap_entries; i++)
  2690. if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
  2691. return i;
  2692. return -1;
  2693. }
  2694. /*
  2695. * Basic iterator support. Return the next active range of PFNs for a node
  2696. * Note: nid == MAX_NUMNODES returns next region regardless of node
  2697. */
  2698. static int __meminit next_active_region_index_in_nid(int index, int nid)
  2699. {
  2700. for (index = index + 1; index < nr_nodemap_entries; index++)
  2701. if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
  2702. return index;
  2703. return -1;
  2704. }
  2705. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  2706. /*
  2707. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  2708. * Architectures may implement their own version but if add_active_range()
  2709. * was used and there are no special requirements, this is a convenient
  2710. * alternative
  2711. */
  2712. int __meminit __early_pfn_to_nid(unsigned long pfn)
  2713. {
  2714. int i;
  2715. for (i = 0; i < nr_nodemap_entries; i++) {
  2716. unsigned long start_pfn = early_node_map[i].start_pfn;
  2717. unsigned long end_pfn = early_node_map[i].end_pfn;
  2718. if (start_pfn <= pfn && pfn < end_pfn)
  2719. return early_node_map[i].nid;
  2720. }
  2721. /* This is a memory hole */
  2722. return -1;
  2723. }
  2724. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  2725. int __meminit early_pfn_to_nid(unsigned long pfn)
  2726. {
  2727. int nid;
  2728. nid = __early_pfn_to_nid(pfn);
  2729. if (nid >= 0)
  2730. return nid;
  2731. /* just returns 0 */
  2732. return 0;
  2733. }
  2734. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  2735. bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  2736. {
  2737. int nid;
  2738. nid = __early_pfn_to_nid(pfn);
  2739. if (nid >= 0 && nid != node)
  2740. return false;
  2741. return true;
  2742. }
  2743. #endif
  2744. /* Basic iterator support to walk early_node_map[] */
  2745. #define for_each_active_range_index_in_nid(i, nid) \
  2746. for (i = first_active_region_index_in_nid(nid); i != -1; \
  2747. i = next_active_region_index_in_nid(i, nid))
  2748. /**
  2749. * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  2750. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  2751. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
  2752. *
  2753. * If an architecture guarantees that all ranges registered with
  2754. * add_active_ranges() contain no holes and may be freed, this
  2755. * this function may be used instead of calling free_bootmem() manually.
  2756. */
  2757. void __init free_bootmem_with_active_regions(int nid,
  2758. unsigned long max_low_pfn)
  2759. {
  2760. int i;
  2761. for_each_active_range_index_in_nid(i, nid) {
  2762. unsigned long size_pages = 0;
  2763. unsigned long end_pfn = early_node_map[i].end_pfn;
  2764. if (early_node_map[i].start_pfn >= max_low_pfn)
  2765. continue;
  2766. if (end_pfn > max_low_pfn)
  2767. end_pfn = max_low_pfn;
  2768. size_pages = end_pfn - early_node_map[i].start_pfn;
  2769. free_bootmem_node(NODE_DATA(early_node_map[i].nid),
  2770. PFN_PHYS(early_node_map[i].start_pfn),
  2771. size_pages << PAGE_SHIFT);
  2772. }
  2773. }
  2774. void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
  2775. {
  2776. int i;
  2777. int ret;
  2778. for_each_active_range_index_in_nid(i, nid) {
  2779. ret = work_fn(early_node_map[i].start_pfn,
  2780. early_node_map[i].end_pfn, data);
  2781. if (ret)
  2782. break;
  2783. }
  2784. }
  2785. /**
  2786. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  2787. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  2788. *
  2789. * If an architecture guarantees that all ranges registered with
  2790. * add_active_ranges() contain no holes and may be freed, this
  2791. * function may be used instead of calling memory_present() manually.
  2792. */
  2793. void __init sparse_memory_present_with_active_regions(int nid)
  2794. {
  2795. int i;
  2796. for_each_active_range_index_in_nid(i, nid)
  2797. memory_present(early_node_map[i].nid,
  2798. early_node_map[i].start_pfn,
  2799. early_node_map[i].end_pfn);
  2800. }
  2801. /**
  2802. * get_pfn_range_for_nid - Return the start and end page frames for a node
  2803. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  2804. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  2805. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  2806. *
  2807. * It returns the start and end page frame of a node based on information
  2808. * provided by an arch calling add_active_range(). If called for a node
  2809. * with no available memory, a warning is printed and the start and end
  2810. * PFNs will be 0.
  2811. */
  2812. void __meminit get_pfn_range_for_nid(unsigned int nid,
  2813. unsigned long *start_pfn, unsigned long *end_pfn)
  2814. {
  2815. int i;
  2816. *start_pfn = -1UL;
  2817. *end_pfn = 0;
  2818. for_each_active_range_index_in_nid(i, nid) {
  2819. *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
  2820. *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
  2821. }
  2822. if (*start_pfn == -1UL)
  2823. *start_pfn = 0;
  2824. }
  2825. /*
  2826. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  2827. * assumption is made that zones within a node are ordered in monotonic
  2828. * increasing memory addresses so that the "highest" populated zone is used
  2829. */
  2830. static void __init find_usable_zone_for_movable(void)
  2831. {
  2832. int zone_index;
  2833. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  2834. if (zone_index == ZONE_MOVABLE)
  2835. continue;
  2836. if (arch_zone_highest_possible_pfn[zone_index] >
  2837. arch_zone_lowest_possible_pfn[zone_index])
  2838. break;
  2839. }
  2840. VM_BUG_ON(zone_index == -1);
  2841. movable_zone = zone_index;
  2842. }
  2843. /*
  2844. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  2845. * because it is sized independant of architecture. Unlike the other zones,
  2846. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  2847. * in each node depending on the size of each node and how evenly kernelcore
  2848. * is distributed. This helper function adjusts the zone ranges
  2849. * provided by the architecture for a given node by using the end of the
  2850. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  2851. * zones within a node are in order of monotonic increases memory addresses
  2852. */
  2853. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  2854. unsigned long zone_type,
  2855. unsigned long node_start_pfn,
  2856. unsigned long node_end_pfn,
  2857. unsigned long *zone_start_pfn,
  2858. unsigned long *zone_end_pfn)
  2859. {
  2860. /* Only adjust if ZONE_MOVABLE is on this node */
  2861. if (zone_movable_pfn[nid]) {
  2862. /* Size ZONE_MOVABLE */
  2863. if (zone_type == ZONE_MOVABLE) {
  2864. *zone_start_pfn = zone_movable_pfn[nid];
  2865. *zone_end_pfn = min(node_end_pfn,
  2866. arch_zone_highest_possible_pfn[movable_zone]);
  2867. /* Adjust for ZONE_MOVABLE starting within this range */
  2868. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  2869. *zone_end_pfn > zone_movable_pfn[nid]) {
  2870. *zone_end_pfn = zone_movable_pfn[nid];
  2871. /* Check if this whole range is within ZONE_MOVABLE */
  2872. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  2873. *zone_start_pfn = *zone_end_pfn;
  2874. }
  2875. }
  2876. /*
  2877. * Return the number of pages a zone spans in a node, including holes
  2878. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  2879. */
  2880. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  2881. unsigned long zone_type,
  2882. unsigned long *ignored)
  2883. {
  2884. unsigned long node_start_pfn, node_end_pfn;
  2885. unsigned long zone_start_pfn, zone_end_pfn;
  2886. /* Get the start and end of the node and zone */
  2887. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2888. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  2889. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  2890. adjust_zone_range_for_zone_movable(nid, zone_type,
  2891. node_start_pfn, node_end_pfn,
  2892. &zone_start_pfn, &zone_end_pfn);
  2893. /* Check that this node has pages within the zone's required range */
  2894. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  2895. return 0;
  2896. /* Move the zone boundaries inside the node if necessary */
  2897. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  2898. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  2899. /* Return the spanned pages */
  2900. return zone_end_pfn - zone_start_pfn;
  2901. }
  2902. /*
  2903. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  2904. * then all holes in the requested range will be accounted for.
  2905. */
  2906. static unsigned long __meminit __absent_pages_in_range(int nid,
  2907. unsigned long range_start_pfn,
  2908. unsigned long range_end_pfn)
  2909. {
  2910. int i = 0;
  2911. unsigned long prev_end_pfn = 0, hole_pages = 0;
  2912. unsigned long start_pfn;
  2913. /* Find the end_pfn of the first active range of pfns in the node */
  2914. i = first_active_region_index_in_nid(nid);
  2915. if (i == -1)
  2916. return 0;
  2917. prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  2918. /* Account for ranges before physical memory on this node */
  2919. if (early_node_map[i].start_pfn > range_start_pfn)
  2920. hole_pages = prev_end_pfn - range_start_pfn;
  2921. /* Find all holes for the zone within the node */
  2922. for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
  2923. /* No need to continue if prev_end_pfn is outside the zone */
  2924. if (prev_end_pfn >= range_end_pfn)
  2925. break;
  2926. /* Make sure the end of the zone is not within the hole */
  2927. start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  2928. prev_end_pfn = max(prev_end_pfn, range_start_pfn);
  2929. /* Update the hole size cound and move on */
  2930. if (start_pfn > range_start_pfn) {
  2931. BUG_ON(prev_end_pfn > start_pfn);
  2932. hole_pages += start_pfn - prev_end_pfn;
  2933. }
  2934. prev_end_pfn = early_node_map[i].end_pfn;
  2935. }
  2936. /* Account for ranges past physical memory on this node */
  2937. if (range_end_pfn > prev_end_pfn)
  2938. hole_pages += range_end_pfn -
  2939. max(range_start_pfn, prev_end_pfn);
  2940. return hole_pages;
  2941. }
  2942. /**
  2943. * absent_pages_in_range - Return number of page frames in holes within a range
  2944. * @start_pfn: The start PFN to start searching for holes
  2945. * @end_pfn: The end PFN to stop searching for holes
  2946. *
  2947. * It returns the number of pages frames in memory holes within a range.
  2948. */
  2949. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  2950. unsigned long end_pfn)
  2951. {
  2952. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  2953. }
  2954. /* Return the number of page frames in holes in a zone on a node */
  2955. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  2956. unsigned long zone_type,
  2957. unsigned long *ignored)
  2958. {
  2959. unsigned long node_start_pfn, node_end_pfn;
  2960. unsigned long zone_start_pfn, zone_end_pfn;
  2961. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2962. zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
  2963. node_start_pfn);
  2964. zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
  2965. node_end_pfn);
  2966. adjust_zone_range_for_zone_movable(nid, zone_type,
  2967. node_start_pfn, node_end_pfn,
  2968. &zone_start_pfn, &zone_end_pfn);
  2969. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  2970. }
  2971. #else
  2972. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  2973. unsigned long zone_type,
  2974. unsigned long *zones_size)
  2975. {
  2976. return zones_size[zone_type];
  2977. }
  2978. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  2979. unsigned long zone_type,
  2980. unsigned long *zholes_size)
  2981. {
  2982. if (!zholes_size)
  2983. return 0;
  2984. return zholes_size[zone_type];
  2985. }
  2986. #endif
  2987. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  2988. unsigned long *zones_size, unsigned long *zholes_size)
  2989. {
  2990. unsigned long realtotalpages, totalpages = 0;
  2991. enum zone_type i;
  2992. for (i = 0; i < MAX_NR_ZONES; i++)
  2993. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  2994. zones_size);
  2995. pgdat->node_spanned_pages = totalpages;
  2996. realtotalpages = totalpages;
  2997. for (i = 0; i < MAX_NR_ZONES; i++)
  2998. realtotalpages -=
  2999. zone_absent_pages_in_node(pgdat->node_id, i,
  3000. zholes_size);
  3001. pgdat->node_present_pages = realtotalpages;
  3002. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  3003. realtotalpages);
  3004. }
  3005. #ifndef CONFIG_SPARSEMEM
  3006. /*
  3007. * Calculate the size of the zone->blockflags rounded to an unsigned long
  3008. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  3009. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  3010. * round what is now in bits to nearest long in bits, then return it in
  3011. * bytes.
  3012. */
  3013. static unsigned long __init usemap_size(unsigned long zonesize)
  3014. {
  3015. unsigned long usemapsize;
  3016. usemapsize = roundup(zonesize, pageblock_nr_pages);
  3017. usemapsize = usemapsize >> pageblock_order;
  3018. usemapsize *= NR_PAGEBLOCK_BITS;
  3019. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  3020. return usemapsize / 8;
  3021. }
  3022. static void __init setup_usemap(struct pglist_data *pgdat,
  3023. struct zone *zone, unsigned long zonesize)
  3024. {
  3025. unsigned long usemapsize = usemap_size(zonesize);
  3026. zone->pageblock_flags = NULL;
  3027. if (usemapsize)
  3028. zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
  3029. }
  3030. #else
  3031. static void inline setup_usemap(struct pglist_data *pgdat,
  3032. struct zone *zone, unsigned long zonesize) {}
  3033. #endif /* CONFIG_SPARSEMEM */
  3034. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  3035. /* Return a sensible default order for the pageblock size. */
  3036. static inline int pageblock_default_order(void)
  3037. {
  3038. if (HPAGE_SHIFT > PAGE_SHIFT)
  3039. return HUGETLB_PAGE_ORDER;
  3040. return MAX_ORDER-1;
  3041. }
  3042. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  3043. static inline void __init set_pageblock_order(unsigned int order)
  3044. {
  3045. /* Check that pageblock_nr_pages has not already been setup */
  3046. if (pageblock_order)
  3047. return;
  3048. /*
  3049. * Assume the largest contiguous order of interest is a huge page.
  3050. * This value may be variable depending on boot parameters on IA64
  3051. */
  3052. pageblock_order = order;
  3053. }
  3054. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3055. /*
  3056. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  3057. * and pageblock_default_order() are unused as pageblock_order is set
  3058. * at compile-time. See include/linux/pageblock-flags.h for the values of
  3059. * pageblock_order based on the kernel config
  3060. */
  3061. static inline int pageblock_default_order(unsigned int order)
  3062. {
  3063. return MAX_ORDER-1;
  3064. }
  3065. #define set_pageblock_order(x) do {} while (0)
  3066. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3067. /*
  3068. * Set up the zone data structures:
  3069. * - mark all pages reserved
  3070. * - mark all memory queues empty
  3071. * - clear the memory bitmaps
  3072. */
  3073. static void __paginginit free_area_init_core(struct pglist_data *pgdat,
  3074. unsigned long *zones_size, unsigned long *zholes_size)
  3075. {
  3076. enum zone_type j;
  3077. int nid = pgdat->node_id;
  3078. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  3079. int ret;
  3080. pgdat_resize_init(pgdat);
  3081. pgdat->nr_zones = 0;
  3082. init_waitqueue_head(&pgdat->kswapd_wait);
  3083. pgdat->kswapd_max_order = 0;
  3084. pgdat_page_cgroup_init(pgdat);
  3085. for (j = 0; j < MAX_NR_ZONES; j++) {
  3086. struct zone *zone = pgdat->node_zones + j;
  3087. unsigned long size, realsize, memmap_pages;
  3088. enum lru_list l;
  3089. size = zone_spanned_pages_in_node(nid, j, zones_size);
  3090. realsize = size - zone_absent_pages_in_node(nid, j,
  3091. zholes_size);
  3092. /*
  3093. * Adjust realsize so that it accounts for how much memory
  3094. * is used by this zone for memmap. This affects the watermark
  3095. * and per-cpu initialisations
  3096. */
  3097. memmap_pages =
  3098. PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
  3099. if (realsize >= memmap_pages) {
  3100. realsize -= memmap_pages;
  3101. if (memmap_pages)
  3102. printk(KERN_DEBUG
  3103. " %s zone: %lu pages used for memmap\n",
  3104. zone_names[j], memmap_pages);
  3105. } else
  3106. printk(KERN_WARNING
  3107. " %s zone: %lu pages exceeds realsize %lu\n",
  3108. zone_names[j], memmap_pages, realsize);
  3109. /* Account for reserved pages */
  3110. if (j == 0 && realsize > dma_reserve) {
  3111. realsize -= dma_reserve;
  3112. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  3113. zone_names[0], dma_reserve);
  3114. }
  3115. if (!is_highmem_idx(j))
  3116. nr_kernel_pages += realsize;
  3117. nr_all_pages += realsize;
  3118. zone->spanned_pages = size;
  3119. zone->present_pages = realsize;
  3120. #ifdef CONFIG_NUMA
  3121. zone->node = nid;
  3122. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  3123. / 100;
  3124. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  3125. #endif
  3126. zone->name = zone_names[j];
  3127. spin_lock_init(&zone->lock);
  3128. spin_lock_init(&zone->lru_lock);
  3129. zone_seqlock_init(zone);
  3130. zone->zone_pgdat = pgdat;
  3131. zone->prev_priority = DEF_PRIORITY;
  3132. zone_pcp_init(zone);
  3133. for_each_lru(l) {
  3134. INIT_LIST_HEAD(&zone->lru[l].list);
  3135. zone->lru[l].nr_scan = 0;
  3136. }
  3137. zone->reclaim_stat.recent_rotated[0] = 0;
  3138. zone->reclaim_stat.recent_rotated[1] = 0;
  3139. zone->reclaim_stat.recent_scanned[0] = 0;
  3140. zone->reclaim_stat.recent_scanned[1] = 0;
  3141. zap_zone_vm_stats(zone);
  3142. zone->flags = 0;
  3143. if (!size)
  3144. continue;
  3145. set_pageblock_order(pageblock_default_order());
  3146. setup_usemap(pgdat, zone, size);
  3147. ret = init_currently_empty_zone(zone, zone_start_pfn,
  3148. size, MEMMAP_EARLY);
  3149. BUG_ON(ret);
  3150. memmap_init(size, nid, j, zone_start_pfn);
  3151. zone_start_pfn += size;
  3152. }
  3153. }
  3154. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  3155. {
  3156. /* Skip empty nodes */
  3157. if (!pgdat->node_spanned_pages)
  3158. return;
  3159. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3160. /* ia64 gets its own node_mem_map, before this, without bootmem */
  3161. if (!pgdat->node_mem_map) {
  3162. unsigned long size, start, end;
  3163. struct page *map;
  3164. /*
  3165. * The zone's endpoints aren't required to be MAX_ORDER
  3166. * aligned but the node_mem_map endpoints must be in order
  3167. * for the buddy allocator to function correctly.
  3168. */
  3169. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  3170. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  3171. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  3172. size = (end - start) * sizeof(struct page);
  3173. map = alloc_remap(pgdat->node_id, size);
  3174. if (!map)
  3175. map = alloc_bootmem_node(pgdat, size);
  3176. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  3177. }
  3178. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3179. /*
  3180. * With no DISCONTIG, the global mem_map is just set as node 0's
  3181. */
  3182. if (pgdat == NODE_DATA(0)) {
  3183. mem_map = NODE_DATA(0)->node_mem_map;
  3184. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  3185. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  3186. mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
  3187. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  3188. }
  3189. #endif
  3190. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  3191. }
  3192. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  3193. unsigned long node_start_pfn, unsigned long *zholes_size)
  3194. {
  3195. pg_data_t *pgdat = NODE_DATA(nid);
  3196. pgdat->node_id = nid;
  3197. pgdat->node_start_pfn = node_start_pfn;
  3198. calculate_node_totalpages(pgdat, zones_size, zholes_size);
  3199. alloc_node_mem_map(pgdat);
  3200. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3201. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  3202. nid, (unsigned long)pgdat,
  3203. (unsigned long)pgdat->node_mem_map);
  3204. #endif
  3205. free_area_init_core(pgdat, zones_size, zholes_size);
  3206. }
  3207. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  3208. #if MAX_NUMNODES > 1
  3209. /*
  3210. * Figure out the number of possible node ids.
  3211. */
  3212. static void __init setup_nr_node_ids(void)
  3213. {
  3214. unsigned int node;
  3215. unsigned int highest = 0;
  3216. for_each_node_mask(node, node_possible_map)
  3217. highest = node;
  3218. nr_node_ids = highest + 1;
  3219. }
  3220. #else
  3221. static inline void setup_nr_node_ids(void)
  3222. {
  3223. }
  3224. #endif
  3225. /**
  3226. * add_active_range - Register a range of PFNs backed by physical memory
  3227. * @nid: The node ID the range resides on
  3228. * @start_pfn: The start PFN of the available physical memory
  3229. * @end_pfn: The end PFN of the available physical memory
  3230. *
  3231. * These ranges are stored in an early_node_map[] and later used by
  3232. * free_area_init_nodes() to calculate zone sizes and holes. If the
  3233. * range spans a memory hole, it is up to the architecture to ensure
  3234. * the memory is not freed by the bootmem allocator. If possible
  3235. * the range being registered will be merged with existing ranges.
  3236. */
  3237. void __init add_active_range(unsigned int nid, unsigned long start_pfn,
  3238. unsigned long end_pfn)
  3239. {
  3240. int i;
  3241. mminit_dprintk(MMINIT_TRACE, "memory_register",
  3242. "Entering add_active_range(%d, %#lx, %#lx) "
  3243. "%d entries of %d used\n",
  3244. nid, start_pfn, end_pfn,
  3245. nr_nodemap_entries, MAX_ACTIVE_REGIONS);
  3246. mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
  3247. /* Merge with existing active regions if possible */
  3248. for (i = 0; i < nr_nodemap_entries; i++) {
  3249. if (early_node_map[i].nid != nid)
  3250. continue;
  3251. /* Skip if an existing region covers this new one */
  3252. if (start_pfn >= early_node_map[i].start_pfn &&
  3253. end_pfn <= early_node_map[i].end_pfn)
  3254. return;
  3255. /* Merge forward if suitable */
  3256. if (start_pfn <= early_node_map[i].end_pfn &&
  3257. end_pfn > early_node_map[i].end_pfn) {
  3258. early_node_map[i].end_pfn = end_pfn;
  3259. return;
  3260. }
  3261. /* Merge backward if suitable */
  3262. if (start_pfn < early_node_map[i].end_pfn &&
  3263. end_pfn >= early_node_map[i].start_pfn) {
  3264. early_node_map[i].start_pfn = start_pfn;
  3265. return;
  3266. }
  3267. }
  3268. /* Check that early_node_map is large enough */
  3269. if (i >= MAX_ACTIVE_REGIONS) {
  3270. printk(KERN_CRIT "More than %d memory regions, truncating\n",
  3271. MAX_ACTIVE_REGIONS);
  3272. return;
  3273. }
  3274. early_node_map[i].nid = nid;
  3275. early_node_map[i].start_pfn = start_pfn;
  3276. early_node_map[i].end_pfn = end_pfn;
  3277. nr_nodemap_entries = i + 1;
  3278. }
  3279. /**
  3280. * remove_active_range - Shrink an existing registered range of PFNs
  3281. * @nid: The node id the range is on that should be shrunk
  3282. * @start_pfn: The new PFN of the range
  3283. * @end_pfn: The new PFN of the range
  3284. *
  3285. * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
  3286. * The map is kept near the end physical page range that has already been
  3287. * registered. This function allows an arch to shrink an existing registered
  3288. * range.
  3289. */
  3290. void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
  3291. unsigned long end_pfn)
  3292. {
  3293. int i, j;
  3294. int removed = 0;
  3295. printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
  3296. nid, start_pfn, end_pfn);
  3297. /* Find the old active region end and shrink */
  3298. for_each_active_range_index_in_nid(i, nid) {
  3299. if (early_node_map[i].start_pfn >= start_pfn &&
  3300. early_node_map[i].end_pfn <= end_pfn) {
  3301. /* clear it */
  3302. early_node_map[i].start_pfn = 0;
  3303. early_node_map[i].end_pfn = 0;
  3304. removed = 1;
  3305. continue;
  3306. }
  3307. if (early_node_map[i].start_pfn < start_pfn &&
  3308. early_node_map[i].end_pfn > start_pfn) {
  3309. unsigned long temp_end_pfn = early_node_map[i].end_pfn;
  3310. early_node_map[i].end_pfn = start_pfn;
  3311. if (temp_end_pfn > end_pfn)
  3312. add_active_range(nid, end_pfn, temp_end_pfn);
  3313. continue;
  3314. }
  3315. if (early_node_map[i].start_pfn >= start_pfn &&
  3316. early_node_map[i].end_pfn > end_pfn &&
  3317. early_node_map[i].start_pfn < end_pfn) {
  3318. early_node_map[i].start_pfn = end_pfn;
  3319. continue;
  3320. }
  3321. }
  3322. if (!removed)
  3323. return;
  3324. /* remove the blank ones */
  3325. for (i = nr_nodemap_entries - 1; i > 0; i--) {
  3326. if (early_node_map[i].nid != nid)
  3327. continue;
  3328. if (early_node_map[i].end_pfn)
  3329. continue;
  3330. /* we found it, get rid of it */
  3331. for (j = i; j < nr_nodemap_entries - 1; j++)
  3332. memcpy(&early_node_map[j], &early_node_map[j+1],
  3333. sizeof(early_node_map[j]));
  3334. j = nr_nodemap_entries - 1;
  3335. memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
  3336. nr_nodemap_entries--;
  3337. }
  3338. }
  3339. /**
  3340. * remove_all_active_ranges - Remove all currently registered regions
  3341. *
  3342. * During discovery, it may be found that a table like SRAT is invalid
  3343. * and an alternative discovery method must be used. This function removes
  3344. * all currently registered regions.
  3345. */
  3346. void __init remove_all_active_ranges(void)
  3347. {
  3348. memset(early_node_map, 0, sizeof(early_node_map));
  3349. nr_nodemap_entries = 0;
  3350. }
  3351. /* Compare two active node_active_regions */
  3352. static int __init cmp_node_active_region(const void *a, const void *b)
  3353. {
  3354. struct node_active_region *arange = (struct node_active_region *)a;
  3355. struct node_active_region *brange = (struct node_active_region *)b;
  3356. /* Done this way to avoid overflows */
  3357. if (arange->start_pfn > brange->start_pfn)
  3358. return 1;
  3359. if (arange->start_pfn < brange->start_pfn)
  3360. return -1;
  3361. return 0;
  3362. }
  3363. /* sort the node_map by start_pfn */
  3364. static void __init sort_node_map(void)
  3365. {
  3366. sort(early_node_map, (size_t)nr_nodemap_entries,
  3367. sizeof(struct node_active_region),
  3368. cmp_node_active_region, NULL);
  3369. }
  3370. /* Find the lowest pfn for a node */
  3371. static unsigned long __init find_min_pfn_for_node(int nid)
  3372. {
  3373. int i;
  3374. unsigned long min_pfn = ULONG_MAX;
  3375. /* Assuming a sorted map, the first range found has the starting pfn */
  3376. for_each_active_range_index_in_nid(i, nid)
  3377. min_pfn = min(min_pfn, early_node_map[i].start_pfn);
  3378. if (min_pfn == ULONG_MAX) {
  3379. printk(KERN_WARNING
  3380. "Could not find start_pfn for node %d\n", nid);
  3381. return 0;
  3382. }
  3383. return min_pfn;
  3384. }
  3385. /**
  3386. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  3387. *
  3388. * It returns the minimum PFN based on information provided via
  3389. * add_active_range().
  3390. */
  3391. unsigned long __init find_min_pfn_with_active_regions(void)
  3392. {
  3393. return find_min_pfn_for_node(MAX_NUMNODES);
  3394. }
  3395. /*
  3396. * early_calculate_totalpages()
  3397. * Sum pages in active regions for movable zone.
  3398. * Populate N_HIGH_MEMORY for calculating usable_nodes.
  3399. */
  3400. static unsigned long __init early_calculate_totalpages(void)
  3401. {
  3402. int i;
  3403. unsigned long totalpages = 0;
  3404. for (i = 0; i < nr_nodemap_entries; i++) {
  3405. unsigned long pages = early_node_map[i].end_pfn -
  3406. early_node_map[i].start_pfn;
  3407. totalpages += pages;
  3408. if (pages)
  3409. node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
  3410. }
  3411. return totalpages;
  3412. }
  3413. /*
  3414. * Find the PFN the Movable zone begins in each node. Kernel memory
  3415. * is spread evenly between nodes as long as the nodes have enough
  3416. * memory. When they don't, some nodes will have more kernelcore than
  3417. * others
  3418. */
  3419. static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
  3420. {
  3421. int i, nid;
  3422. unsigned long usable_startpfn;
  3423. unsigned long kernelcore_node, kernelcore_remaining;
  3424. unsigned long totalpages = early_calculate_totalpages();
  3425. int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  3426. /*
  3427. * If movablecore was specified, calculate what size of
  3428. * kernelcore that corresponds so that memory usable for
  3429. * any allocation type is evenly spread. If both kernelcore
  3430. * and movablecore are specified, then the value of kernelcore
  3431. * will be used for required_kernelcore if it's greater than
  3432. * what movablecore would have allowed.
  3433. */
  3434. if (required_movablecore) {
  3435. unsigned long corepages;
  3436. /*
  3437. * Round-up so that ZONE_MOVABLE is at least as large as what
  3438. * was requested by the user
  3439. */
  3440. required_movablecore =
  3441. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  3442. corepages = totalpages - required_movablecore;
  3443. required_kernelcore = max(required_kernelcore, corepages);
  3444. }
  3445. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  3446. if (!required_kernelcore)
  3447. return;
  3448. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  3449. find_usable_zone_for_movable();
  3450. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  3451. restart:
  3452. /* Spread kernelcore memory as evenly as possible throughout nodes */
  3453. kernelcore_node = required_kernelcore / usable_nodes;
  3454. for_each_node_state(nid, N_HIGH_MEMORY) {
  3455. /*
  3456. * Recalculate kernelcore_node if the division per node
  3457. * now exceeds what is necessary to satisfy the requested
  3458. * amount of memory for the kernel
  3459. */
  3460. if (required_kernelcore < kernelcore_node)
  3461. kernelcore_node = required_kernelcore / usable_nodes;
  3462. /*
  3463. * As the map is walked, we track how much memory is usable
  3464. * by the kernel using kernelcore_remaining. When it is
  3465. * 0, the rest of the node is usable by ZONE_MOVABLE
  3466. */
  3467. kernelcore_remaining = kernelcore_node;
  3468. /* Go through each range of PFNs within this node */
  3469. for_each_active_range_index_in_nid(i, nid) {
  3470. unsigned long start_pfn, end_pfn;
  3471. unsigned long size_pages;
  3472. start_pfn = max(early_node_map[i].start_pfn,
  3473. zone_movable_pfn[nid]);
  3474. end_pfn = early_node_map[i].end_pfn;
  3475. if (start_pfn >= end_pfn)
  3476. continue;
  3477. /* Account for what is only usable for kernelcore */
  3478. if (start_pfn < usable_startpfn) {
  3479. unsigned long kernel_pages;
  3480. kernel_pages = min(end_pfn, usable_startpfn)
  3481. - start_pfn;
  3482. kernelcore_remaining -= min(kernel_pages,
  3483. kernelcore_remaining);
  3484. required_kernelcore -= min(kernel_pages,
  3485. required_kernelcore);
  3486. /* Continue if range is now fully accounted */
  3487. if (end_pfn <= usable_startpfn) {
  3488. /*
  3489. * Push zone_movable_pfn to the end so
  3490. * that if we have to rebalance
  3491. * kernelcore across nodes, we will
  3492. * not double account here
  3493. */
  3494. zone_movable_pfn[nid] = end_pfn;
  3495. continue;
  3496. }
  3497. start_pfn = usable_startpfn;
  3498. }
  3499. /*
  3500. * The usable PFN range for ZONE_MOVABLE is from
  3501. * start_pfn->end_pfn. Calculate size_pages as the
  3502. * number of pages used as kernelcore
  3503. */
  3504. size_pages = end_pfn - start_pfn;
  3505. if (size_pages > kernelcore_remaining)
  3506. size_pages = kernelcore_remaining;
  3507. zone_movable_pfn[nid] = start_pfn + size_pages;
  3508. /*
  3509. * Some kernelcore has been met, update counts and
  3510. * break if the kernelcore for this node has been
  3511. * satisified
  3512. */
  3513. required_kernelcore -= min(required_kernelcore,
  3514. size_pages);
  3515. kernelcore_remaining -= size_pages;
  3516. if (!kernelcore_remaining)
  3517. break;
  3518. }
  3519. }
  3520. /*
  3521. * If there is still required_kernelcore, we do another pass with one
  3522. * less node in the count. This will push zone_movable_pfn[nid] further
  3523. * along on the nodes that still have memory until kernelcore is
  3524. * satisified
  3525. */
  3526. usable_nodes--;
  3527. if (usable_nodes && required_kernelcore > usable_nodes)
  3528. goto restart;
  3529. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  3530. for (nid = 0; nid < MAX_NUMNODES; nid++)
  3531. zone_movable_pfn[nid] =
  3532. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  3533. }
  3534. /* Any regular memory on that node ? */
  3535. static void check_for_regular_memory(pg_data_t *pgdat)
  3536. {
  3537. #ifdef CONFIG_HIGHMEM
  3538. enum zone_type zone_type;
  3539. for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
  3540. struct zone *zone = &pgdat->node_zones[zone_type];
  3541. if (zone->present_pages)
  3542. node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
  3543. }
  3544. #endif
  3545. }
  3546. /**
  3547. * free_area_init_nodes - Initialise all pg_data_t and zone data
  3548. * @max_zone_pfn: an array of max PFNs for each zone
  3549. *
  3550. * This will call free_area_init_node() for each active node in the system.
  3551. * Using the page ranges provided by add_active_range(), the size of each
  3552. * zone in each node and their holes is calculated. If the maximum PFN
  3553. * between two adjacent zones match, it is assumed that the zone is empty.
  3554. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  3555. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  3556. * starts where the previous one ended. For example, ZONE_DMA32 starts
  3557. * at arch_max_dma_pfn.
  3558. */
  3559. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  3560. {
  3561. unsigned long nid;
  3562. int i;
  3563. /* Sort early_node_map as initialisation assumes it is sorted */
  3564. sort_node_map();
  3565. /* Record where the zone boundaries are */
  3566. memset(arch_zone_lowest_possible_pfn, 0,
  3567. sizeof(arch_zone_lowest_possible_pfn));
  3568. memset(arch_zone_highest_possible_pfn, 0,
  3569. sizeof(arch_zone_highest_possible_pfn));
  3570. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  3571. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  3572. for (i = 1; i < MAX_NR_ZONES; i++) {
  3573. if (i == ZONE_MOVABLE)
  3574. continue;
  3575. arch_zone_lowest_possible_pfn[i] =
  3576. arch_zone_highest_possible_pfn[i-1];
  3577. arch_zone_highest_possible_pfn[i] =
  3578. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  3579. }
  3580. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  3581. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  3582. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  3583. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  3584. find_zone_movable_pfns_for_nodes(zone_movable_pfn);
  3585. /* Print out the zone ranges */
  3586. printk("Zone PFN ranges:\n");
  3587. for (i = 0; i < MAX_NR_ZONES; i++) {
  3588. if (i == ZONE_MOVABLE)
  3589. continue;
  3590. printk(" %-8s %0#10lx -> %0#10lx\n",
  3591. zone_names[i],
  3592. arch_zone_lowest_possible_pfn[i],
  3593. arch_zone_highest_possible_pfn[i]);
  3594. }
  3595. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  3596. printk("Movable zone start PFN for each node\n");
  3597. for (i = 0; i < MAX_NUMNODES; i++) {
  3598. if (zone_movable_pfn[i])
  3599. printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
  3600. }
  3601. /* Print out the early_node_map[] */
  3602. printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
  3603. for (i = 0; i < nr_nodemap_entries; i++)
  3604. printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
  3605. early_node_map[i].start_pfn,
  3606. early_node_map[i].end_pfn);
  3607. /* Initialise every node */
  3608. mminit_verify_pageflags_layout();
  3609. setup_nr_node_ids();
  3610. for_each_online_node(nid) {
  3611. pg_data_t *pgdat = NODE_DATA(nid);
  3612. free_area_init_node(nid, NULL,
  3613. find_min_pfn_for_node(nid), NULL);
  3614. /* Any memory on that node */
  3615. if (pgdat->node_present_pages)
  3616. node_set_state(nid, N_HIGH_MEMORY);
  3617. check_for_regular_memory(pgdat);
  3618. }
  3619. }
  3620. static int __init cmdline_parse_core(char *p, unsigned long *core)
  3621. {
  3622. unsigned long long coremem;
  3623. if (!p)
  3624. return -EINVAL;
  3625. coremem = memparse(p, &p);
  3626. *core = coremem >> PAGE_SHIFT;
  3627. /* Paranoid check that UL is enough for the coremem value */
  3628. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  3629. return 0;
  3630. }
  3631. /*
  3632. * kernelcore=size sets the amount of memory for use for allocations that
  3633. * cannot be reclaimed or migrated.
  3634. */
  3635. static int __init cmdline_parse_kernelcore(char *p)
  3636. {
  3637. return cmdline_parse_core(p, &required_kernelcore);
  3638. }
  3639. /*
  3640. * movablecore=size sets the amount of memory for use for allocations that
  3641. * can be reclaimed or migrated.
  3642. */
  3643. static int __init cmdline_parse_movablecore(char *p)
  3644. {
  3645. return cmdline_parse_core(p, &required_movablecore);
  3646. }
  3647. early_param("kernelcore", cmdline_parse_kernelcore);
  3648. early_param("movablecore", cmdline_parse_movablecore);
  3649. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  3650. /**
  3651. * set_dma_reserve - set the specified number of pages reserved in the first zone
  3652. * @new_dma_reserve: The number of pages to mark reserved
  3653. *
  3654. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  3655. * In the DMA zone, a significant percentage may be consumed by kernel image
  3656. * and other unfreeable allocations which can skew the watermarks badly. This
  3657. * function may optionally be used to account for unfreeable pages in the
  3658. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  3659. * smaller per-cpu batchsize.
  3660. */
  3661. void __init set_dma_reserve(unsigned long new_dma_reserve)
  3662. {
  3663. dma_reserve = new_dma_reserve;
  3664. }
  3665. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3666. struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
  3667. EXPORT_SYMBOL(contig_page_data);
  3668. #endif
  3669. void __init free_area_init(unsigned long *zones_size)
  3670. {
  3671. free_area_init_node(0, zones_size,
  3672. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  3673. }
  3674. static int page_alloc_cpu_notify(struct notifier_block *self,
  3675. unsigned long action, void *hcpu)
  3676. {
  3677. int cpu = (unsigned long)hcpu;
  3678. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  3679. drain_pages(cpu);
  3680. /*
  3681. * Spill the event counters of the dead processor
  3682. * into the current processors event counters.
  3683. * This artificially elevates the count of the current
  3684. * processor.
  3685. */
  3686. vm_events_fold_cpu(cpu);
  3687. /*
  3688. * Zero the differential counters of the dead processor
  3689. * so that the vm statistics are consistent.
  3690. *
  3691. * This is only okay since the processor is dead and cannot
  3692. * race with what we are doing.
  3693. */
  3694. refresh_cpu_vm_stats(cpu);
  3695. }
  3696. return NOTIFY_OK;
  3697. }
  3698. void __init page_alloc_init(void)
  3699. {
  3700. hotcpu_notifier(page_alloc_cpu_notify, 0);
  3701. }
  3702. /*
  3703. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  3704. * or min_free_kbytes changes.
  3705. */
  3706. static void calculate_totalreserve_pages(void)
  3707. {
  3708. struct pglist_data *pgdat;
  3709. unsigned long reserve_pages = 0;
  3710. enum zone_type i, j;
  3711. for_each_online_pgdat(pgdat) {
  3712. for (i = 0; i < MAX_NR_ZONES; i++) {
  3713. struct zone *zone = pgdat->node_zones + i;
  3714. unsigned long max = 0;
  3715. /* Find valid and maximum lowmem_reserve in the zone */
  3716. for (j = i; j < MAX_NR_ZONES; j++) {
  3717. if (zone->lowmem_reserve[j] > max)
  3718. max = zone->lowmem_reserve[j];
  3719. }
  3720. /* we treat pages_high as reserved pages. */
  3721. max += zone->pages_high;
  3722. if (max > zone->present_pages)
  3723. max = zone->present_pages;
  3724. reserve_pages += max;
  3725. }
  3726. }
  3727. totalreserve_pages = reserve_pages;
  3728. }
  3729. /*
  3730. * setup_per_zone_lowmem_reserve - called whenever
  3731. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  3732. * has a correct pages reserved value, so an adequate number of
  3733. * pages are left in the zone after a successful __alloc_pages().
  3734. */
  3735. static void setup_per_zone_lowmem_reserve(void)
  3736. {
  3737. struct pglist_data *pgdat;
  3738. enum zone_type j, idx;
  3739. for_each_online_pgdat(pgdat) {
  3740. for (j = 0; j < MAX_NR_ZONES; j++) {
  3741. struct zone *zone = pgdat->node_zones + j;
  3742. unsigned long present_pages = zone->present_pages;
  3743. zone->lowmem_reserve[j] = 0;
  3744. idx = j;
  3745. while (idx) {
  3746. struct zone *lower_zone;
  3747. idx--;
  3748. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  3749. sysctl_lowmem_reserve_ratio[idx] = 1;
  3750. lower_zone = pgdat->node_zones + idx;
  3751. lower_zone->lowmem_reserve[j] = present_pages /
  3752. sysctl_lowmem_reserve_ratio[idx];
  3753. present_pages += lower_zone->present_pages;
  3754. }
  3755. }
  3756. }
  3757. /* update totalreserve_pages */
  3758. calculate_totalreserve_pages();
  3759. }
  3760. /**
  3761. * setup_per_zone_pages_min - called when min_free_kbytes changes.
  3762. *
  3763. * Ensures that the pages_{min,low,high} values for each zone are set correctly
  3764. * with respect to min_free_kbytes.
  3765. */
  3766. void setup_per_zone_pages_min(void)
  3767. {
  3768. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  3769. unsigned long lowmem_pages = 0;
  3770. struct zone *zone;
  3771. unsigned long flags;
  3772. /* Calculate total number of !ZONE_HIGHMEM pages */
  3773. for_each_zone(zone) {
  3774. if (!is_highmem(zone))
  3775. lowmem_pages += zone->present_pages;
  3776. }
  3777. for_each_zone(zone) {
  3778. u64 tmp;
  3779. spin_lock_irqsave(&zone->lock, flags);
  3780. tmp = (u64)pages_min * zone->present_pages;
  3781. do_div(tmp, lowmem_pages);
  3782. if (is_highmem(zone)) {
  3783. /*
  3784. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  3785. * need highmem pages, so cap pages_min to a small
  3786. * value here.
  3787. *
  3788. * The (pages_high-pages_low) and (pages_low-pages_min)
  3789. * deltas controls asynch page reclaim, and so should
  3790. * not be capped for highmem.
  3791. */
  3792. int min_pages;
  3793. min_pages = zone->present_pages / 1024;
  3794. if (min_pages < SWAP_CLUSTER_MAX)
  3795. min_pages = SWAP_CLUSTER_MAX;
  3796. if (min_pages > 128)
  3797. min_pages = 128;
  3798. zone->pages_min = min_pages;
  3799. } else {
  3800. /*
  3801. * If it's a lowmem zone, reserve a number of pages
  3802. * proportionate to the zone's size.
  3803. */
  3804. zone->pages_min = tmp;
  3805. }
  3806. zone->pages_low = zone->pages_min + (tmp >> 2);
  3807. zone->pages_high = zone->pages_min + (tmp >> 1);
  3808. setup_zone_migrate_reserve(zone);
  3809. spin_unlock_irqrestore(&zone->lock, flags);
  3810. }
  3811. /* update totalreserve_pages */
  3812. calculate_totalreserve_pages();
  3813. }
  3814. /**
  3815. * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
  3816. *
  3817. * The inactive anon list should be small enough that the VM never has to
  3818. * do too much work, but large enough that each inactive page has a chance
  3819. * to be referenced again before it is swapped out.
  3820. *
  3821. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  3822. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  3823. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  3824. * the anonymous pages are kept on the inactive list.
  3825. *
  3826. * total target max
  3827. * memory ratio inactive anon
  3828. * -------------------------------------
  3829. * 10MB 1 5MB
  3830. * 100MB 1 50MB
  3831. * 1GB 3 250MB
  3832. * 10GB 10 0.9GB
  3833. * 100GB 31 3GB
  3834. * 1TB 101 10GB
  3835. * 10TB 320 32GB
  3836. */
  3837. static void setup_per_zone_inactive_ratio(void)
  3838. {
  3839. struct zone *zone;
  3840. for_each_zone(zone) {
  3841. unsigned int gb, ratio;
  3842. /* Zone size in gigabytes */
  3843. gb = zone->present_pages >> (30 - PAGE_SHIFT);
  3844. ratio = int_sqrt(10 * gb);
  3845. if (!ratio)
  3846. ratio = 1;
  3847. zone->inactive_ratio = ratio;
  3848. }
  3849. }
  3850. /*
  3851. * Initialise min_free_kbytes.
  3852. *
  3853. * For small machines we want it small (128k min). For large machines
  3854. * we want it large (64MB max). But it is not linear, because network
  3855. * bandwidth does not increase linearly with machine size. We use
  3856. *
  3857. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  3858. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  3859. *
  3860. * which yields
  3861. *
  3862. * 16MB: 512k
  3863. * 32MB: 724k
  3864. * 64MB: 1024k
  3865. * 128MB: 1448k
  3866. * 256MB: 2048k
  3867. * 512MB: 2896k
  3868. * 1024MB: 4096k
  3869. * 2048MB: 5792k
  3870. * 4096MB: 8192k
  3871. * 8192MB: 11584k
  3872. * 16384MB: 16384k
  3873. */
  3874. static int __init init_per_zone_pages_min(void)
  3875. {
  3876. unsigned long lowmem_kbytes;
  3877. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  3878. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  3879. if (min_free_kbytes < 128)
  3880. min_free_kbytes = 128;
  3881. if (min_free_kbytes > 65536)
  3882. min_free_kbytes = 65536;
  3883. setup_per_zone_pages_min();
  3884. setup_per_zone_lowmem_reserve();
  3885. setup_per_zone_inactive_ratio();
  3886. return 0;
  3887. }
  3888. module_init(init_per_zone_pages_min)
  3889. /*
  3890. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  3891. * that we can call two helper functions whenever min_free_kbytes
  3892. * changes.
  3893. */
  3894. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  3895. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3896. {
  3897. proc_dointvec(table, write, file, buffer, length, ppos);
  3898. if (write)
  3899. setup_per_zone_pages_min();
  3900. return 0;
  3901. }
  3902. #ifdef CONFIG_NUMA
  3903. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  3904. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3905. {
  3906. struct zone *zone;
  3907. int rc;
  3908. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3909. if (rc)
  3910. return rc;
  3911. for_each_zone(zone)
  3912. zone->min_unmapped_pages = (zone->present_pages *
  3913. sysctl_min_unmapped_ratio) / 100;
  3914. return 0;
  3915. }
  3916. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  3917. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3918. {
  3919. struct zone *zone;
  3920. int rc;
  3921. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3922. if (rc)
  3923. return rc;
  3924. for_each_zone(zone)
  3925. zone->min_slab_pages = (zone->present_pages *
  3926. sysctl_min_slab_ratio) / 100;
  3927. return 0;
  3928. }
  3929. #endif
  3930. /*
  3931. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  3932. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  3933. * whenever sysctl_lowmem_reserve_ratio changes.
  3934. *
  3935. * The reserve ratio obviously has absolutely no relation with the
  3936. * pages_min watermarks. The lowmem reserve ratio can only make sense
  3937. * if in function of the boot time zone sizes.
  3938. */
  3939. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  3940. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3941. {
  3942. proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3943. setup_per_zone_lowmem_reserve();
  3944. return 0;
  3945. }
  3946. /*
  3947. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  3948. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  3949. * can have before it gets flushed back to buddy allocator.
  3950. */
  3951. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  3952. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3953. {
  3954. struct zone *zone;
  3955. unsigned int cpu;
  3956. int ret;
  3957. ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3958. if (!write || (ret == -EINVAL))
  3959. return ret;
  3960. for_each_zone(zone) {
  3961. for_each_online_cpu(cpu) {
  3962. unsigned long high;
  3963. high = zone->present_pages / percpu_pagelist_fraction;
  3964. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  3965. }
  3966. }
  3967. return 0;
  3968. }
  3969. int hashdist = HASHDIST_DEFAULT;
  3970. #ifdef CONFIG_NUMA
  3971. static int __init set_hashdist(char *str)
  3972. {
  3973. if (!str)
  3974. return 0;
  3975. hashdist = simple_strtoul(str, &str, 0);
  3976. return 1;
  3977. }
  3978. __setup("hashdist=", set_hashdist);
  3979. #endif
  3980. /*
  3981. * allocate a large system hash table from bootmem
  3982. * - it is assumed that the hash table must contain an exact power-of-2
  3983. * quantity of entries
  3984. * - limit is the number of hash buckets, not the total allocation size
  3985. */
  3986. void *__init alloc_large_system_hash(const char *tablename,
  3987. unsigned long bucketsize,
  3988. unsigned long numentries,
  3989. int scale,
  3990. int flags,
  3991. unsigned int *_hash_shift,
  3992. unsigned int *_hash_mask,
  3993. unsigned long limit)
  3994. {
  3995. unsigned long long max = limit;
  3996. unsigned long log2qty, size;
  3997. void *table = NULL;
  3998. /* allow the kernel cmdline to have a say */
  3999. if (!numentries) {
  4000. /* round applicable memory size up to nearest megabyte */
  4001. numentries = nr_kernel_pages;
  4002. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  4003. numentries >>= 20 - PAGE_SHIFT;
  4004. numentries <<= 20 - PAGE_SHIFT;
  4005. /* limit to 1 bucket per 2^scale bytes of low memory */
  4006. if (scale > PAGE_SHIFT)
  4007. numentries >>= (scale - PAGE_SHIFT);
  4008. else
  4009. numentries <<= (PAGE_SHIFT - scale);
  4010. /* Make sure we've got at least a 0-order allocation.. */
  4011. if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  4012. numentries = PAGE_SIZE / bucketsize;
  4013. }
  4014. numentries = roundup_pow_of_two(numentries);
  4015. /* limit allocation size to 1/16 total memory by default */
  4016. if (max == 0) {
  4017. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  4018. do_div(max, bucketsize);
  4019. }
  4020. if (numentries > max)
  4021. numentries = max;
  4022. log2qty = ilog2(numentries);
  4023. do {
  4024. size = bucketsize << log2qty;
  4025. if (flags & HASH_EARLY)
  4026. table = alloc_bootmem_nopanic(size);
  4027. else if (hashdist)
  4028. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  4029. else {
  4030. unsigned long order = get_order(size);
  4031. if (order < MAX_ORDER)
  4032. table = (void *)__get_free_pages(GFP_ATOMIC,
  4033. order);
  4034. /*
  4035. * If bucketsize is not a power-of-two, we may free
  4036. * some pages at the end of hash table.
  4037. */
  4038. if (table) {
  4039. unsigned long alloc_end = (unsigned long)table +
  4040. (PAGE_SIZE << order);
  4041. unsigned long used = (unsigned long)table +
  4042. PAGE_ALIGN(size);
  4043. split_page(virt_to_page(table), order);
  4044. while (used < alloc_end) {
  4045. free_page(used);
  4046. used += PAGE_SIZE;
  4047. }
  4048. }
  4049. }
  4050. } while (!table && size > PAGE_SIZE && --log2qty);
  4051. if (!table)
  4052. panic("Failed to allocate %s hash table\n", tablename);
  4053. printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
  4054. tablename,
  4055. (1U << log2qty),
  4056. ilog2(size) - PAGE_SHIFT,
  4057. size);
  4058. if (_hash_shift)
  4059. *_hash_shift = log2qty;
  4060. if (_hash_mask)
  4061. *_hash_mask = (1 << log2qty) - 1;
  4062. /*
  4063. * If hashdist is set, the table allocation is done with __vmalloc()
  4064. * which invokes the kmemleak_alloc() callback. This function may also
  4065. * be called before the slab and kmemleak are initialised when
  4066. * kmemleak simply buffers the request to be executed later
  4067. * (GFP_ATOMIC flag ignored in this case).
  4068. */
  4069. if (!hashdist)
  4070. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  4071. return table;
  4072. }
  4073. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  4074. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  4075. unsigned long pfn)
  4076. {
  4077. #ifdef CONFIG_SPARSEMEM
  4078. return __pfn_to_section(pfn)->pageblock_flags;
  4079. #else
  4080. return zone->pageblock_flags;
  4081. #endif /* CONFIG_SPARSEMEM */
  4082. }
  4083. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  4084. {
  4085. #ifdef CONFIG_SPARSEMEM
  4086. pfn &= (PAGES_PER_SECTION-1);
  4087. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4088. #else
  4089. pfn = pfn - zone->zone_start_pfn;
  4090. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4091. #endif /* CONFIG_SPARSEMEM */
  4092. }
  4093. /**
  4094. * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
  4095. * @page: The page within the block of interest
  4096. * @start_bitidx: The first bit of interest to retrieve
  4097. * @end_bitidx: The last bit of interest
  4098. * returns pageblock_bits flags
  4099. */
  4100. unsigned long get_pageblock_flags_group(struct page *page,
  4101. int start_bitidx, int end_bitidx)
  4102. {
  4103. struct zone *zone;
  4104. unsigned long *bitmap;
  4105. unsigned long pfn, bitidx;
  4106. unsigned long flags = 0;
  4107. unsigned long value = 1;
  4108. zone = page_zone(page);
  4109. pfn = page_to_pfn(page);
  4110. bitmap = get_pageblock_bitmap(zone, pfn);
  4111. bitidx = pfn_to_bitidx(zone, pfn);
  4112. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4113. if (test_bit(bitidx + start_bitidx, bitmap))
  4114. flags |= value;
  4115. return flags;
  4116. }
  4117. /**
  4118. * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
  4119. * @page: The page within the block of interest
  4120. * @start_bitidx: The first bit of interest
  4121. * @end_bitidx: The last bit of interest
  4122. * @flags: The flags to set
  4123. */
  4124. void set_pageblock_flags_group(struct page *page, unsigned long flags,
  4125. int start_bitidx, int end_bitidx)
  4126. {
  4127. struct zone *zone;
  4128. unsigned long *bitmap;
  4129. unsigned long pfn, bitidx;
  4130. unsigned long value = 1;
  4131. zone = page_zone(page);
  4132. pfn = page_to_pfn(page);
  4133. bitmap = get_pageblock_bitmap(zone, pfn);
  4134. bitidx = pfn_to_bitidx(zone, pfn);
  4135. VM_BUG_ON(pfn < zone->zone_start_pfn);
  4136. VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
  4137. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4138. if (flags & value)
  4139. __set_bit(bitidx + start_bitidx, bitmap);
  4140. else
  4141. __clear_bit(bitidx + start_bitidx, bitmap);
  4142. }
  4143. /*
  4144. * This is designed as sub function...plz see page_isolation.c also.
  4145. * set/clear page block's type to be ISOLATE.
  4146. * page allocater never alloc memory from ISOLATE block.
  4147. */
  4148. int set_migratetype_isolate(struct page *page)
  4149. {
  4150. struct zone *zone;
  4151. unsigned long flags;
  4152. int ret = -EBUSY;
  4153. zone = page_zone(page);
  4154. spin_lock_irqsave(&zone->lock, flags);
  4155. /*
  4156. * In future, more migrate types will be able to be isolation target.
  4157. */
  4158. if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
  4159. goto out;
  4160. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  4161. move_freepages_block(zone, page, MIGRATE_ISOLATE);
  4162. ret = 0;
  4163. out:
  4164. spin_unlock_irqrestore(&zone->lock, flags);
  4165. if (!ret)
  4166. drain_all_pages();
  4167. return ret;
  4168. }
  4169. void unset_migratetype_isolate(struct page *page)
  4170. {
  4171. struct zone *zone;
  4172. unsigned long flags;
  4173. zone = page_zone(page);
  4174. spin_lock_irqsave(&zone->lock, flags);
  4175. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  4176. goto out;
  4177. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4178. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  4179. out:
  4180. spin_unlock_irqrestore(&zone->lock, flags);
  4181. }
  4182. #ifdef CONFIG_MEMORY_HOTREMOVE
  4183. /*
  4184. * All pages in the range must be isolated before calling this.
  4185. */
  4186. void
  4187. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  4188. {
  4189. struct page *page;
  4190. struct zone *zone;
  4191. int order, i;
  4192. unsigned long pfn;
  4193. unsigned long flags;
  4194. /* find the first valid pfn */
  4195. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  4196. if (pfn_valid(pfn))
  4197. break;
  4198. if (pfn == end_pfn)
  4199. return;
  4200. zone = page_zone(pfn_to_page(pfn));
  4201. spin_lock_irqsave(&zone->lock, flags);
  4202. pfn = start_pfn;
  4203. while (pfn < end_pfn) {
  4204. if (!pfn_valid(pfn)) {
  4205. pfn++;
  4206. continue;
  4207. }
  4208. page = pfn_to_page(pfn);
  4209. BUG_ON(page_count(page));
  4210. BUG_ON(!PageBuddy(page));
  4211. order = page_order(page);
  4212. #ifdef CONFIG_DEBUG_VM
  4213. printk(KERN_INFO "remove from free list %lx %d %lx\n",
  4214. pfn, 1 << order, end_pfn);
  4215. #endif
  4216. list_del(&page->lru);
  4217. rmv_page_order(page);
  4218. zone->free_area[order].nr_free--;
  4219. __mod_zone_page_state(zone, NR_FREE_PAGES,
  4220. - (1UL << order));
  4221. for (i = 0; i < (1 << order); i++)
  4222. SetPageReserved((page+i));
  4223. pfn += (1 << order);
  4224. }
  4225. spin_unlock_irqrestore(&zone->lock, flags);
  4226. }
  4227. #endif