page_alloc.c 150 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmemcheck.h>
  27. #include <linux/module.h>
  28. #include <linux/suspend.h>
  29. #include <linux/pagevec.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/slab.h>
  32. #include <linux/ratelimit.h>
  33. #include <linux/oom.h>
  34. #include <linux/notifier.h>
  35. #include <linux/topology.h>
  36. #include <linux/sysctl.h>
  37. #include <linux/cpu.h>
  38. #include <linux/cpuset.h>
  39. #include <linux/memory_hotplug.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/vmstat.h>
  43. #include <linux/mempolicy.h>
  44. #include <linux/stop_machine.h>
  45. #include <linux/sort.h>
  46. #include <linux/pfn.h>
  47. #include <linux/backing-dev.h>
  48. #include <linux/fault-inject.h>
  49. #include <linux/page-isolation.h>
  50. #include <linux/page_cgroup.h>
  51. #include <linux/debugobjects.h>
  52. #include <linux/kmemleak.h>
  53. #include <linux/memory.h>
  54. #include <linux/compaction.h>
  55. #include <trace/events/kmem.h>
  56. #include <linux/ftrace_event.h>
  57. #include <linux/memcontrol.h>
  58. #include <linux/prefetch.h>
  59. #include <asm/tlbflush.h>
  60. #include <asm/div64.h>
  61. #include "internal.h"
  62. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  63. DEFINE_PER_CPU(int, numa_node);
  64. EXPORT_PER_CPU_SYMBOL(numa_node);
  65. #endif
  66. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  67. /*
  68. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  69. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  70. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  71. * defined in <linux/topology.h>.
  72. */
  73. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  74. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  75. #endif
  76. /*
  77. * Array of node states.
  78. */
  79. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  80. [N_POSSIBLE] = NODE_MASK_ALL,
  81. [N_ONLINE] = { { [0] = 1UL } },
  82. #ifndef CONFIG_NUMA
  83. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  84. #ifdef CONFIG_HIGHMEM
  85. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  86. #endif
  87. [N_CPU] = { { [0] = 1UL } },
  88. #endif /* NUMA */
  89. };
  90. EXPORT_SYMBOL(node_states);
  91. unsigned long totalram_pages __read_mostly;
  92. unsigned long totalreserve_pages __read_mostly;
  93. int percpu_pagelist_fraction;
  94. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  95. #ifdef CONFIG_PM_SLEEP
  96. /*
  97. * The following functions are used by the suspend/hibernate code to temporarily
  98. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  99. * while devices are suspended. To avoid races with the suspend/hibernate code,
  100. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  101. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  102. * guaranteed not to run in parallel with that modification).
  103. */
  104. static gfp_t saved_gfp_mask;
  105. void pm_restore_gfp_mask(void)
  106. {
  107. WARN_ON(!mutex_is_locked(&pm_mutex));
  108. if (saved_gfp_mask) {
  109. gfp_allowed_mask = saved_gfp_mask;
  110. saved_gfp_mask = 0;
  111. }
  112. }
  113. void pm_restrict_gfp_mask(void)
  114. {
  115. WARN_ON(!mutex_is_locked(&pm_mutex));
  116. WARN_ON(saved_gfp_mask);
  117. saved_gfp_mask = gfp_allowed_mask;
  118. gfp_allowed_mask &= ~GFP_IOFS;
  119. }
  120. #endif /* CONFIG_PM_SLEEP */
  121. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  122. int pageblock_order __read_mostly;
  123. #endif
  124. static void __free_pages_ok(struct page *page, unsigned int order);
  125. /*
  126. * results with 256, 32 in the lowmem_reserve sysctl:
  127. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  128. * 1G machine -> (16M dma, 784M normal, 224M high)
  129. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  130. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  131. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  132. *
  133. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  134. * don't need any ZONE_NORMAL reservation
  135. */
  136. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  137. #ifdef CONFIG_ZONE_DMA
  138. 256,
  139. #endif
  140. #ifdef CONFIG_ZONE_DMA32
  141. 256,
  142. #endif
  143. #ifdef CONFIG_HIGHMEM
  144. 32,
  145. #endif
  146. 32,
  147. };
  148. EXPORT_SYMBOL(totalram_pages);
  149. static char * const zone_names[MAX_NR_ZONES] = {
  150. #ifdef CONFIG_ZONE_DMA
  151. "DMA",
  152. #endif
  153. #ifdef CONFIG_ZONE_DMA32
  154. "DMA32",
  155. #endif
  156. "Normal",
  157. #ifdef CONFIG_HIGHMEM
  158. "HighMem",
  159. #endif
  160. "Movable",
  161. };
  162. int min_free_kbytes = 1024;
  163. static unsigned long __meminitdata nr_kernel_pages;
  164. static unsigned long __meminitdata nr_all_pages;
  165. static unsigned long __meminitdata dma_reserve;
  166. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  167. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  168. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  169. static unsigned long __initdata required_kernelcore;
  170. static unsigned long __initdata required_movablecore;
  171. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  172. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  173. int movable_zone;
  174. EXPORT_SYMBOL(movable_zone);
  175. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  176. #if MAX_NUMNODES > 1
  177. int nr_node_ids __read_mostly = MAX_NUMNODES;
  178. int nr_online_nodes __read_mostly = 1;
  179. EXPORT_SYMBOL(nr_node_ids);
  180. EXPORT_SYMBOL(nr_online_nodes);
  181. #endif
  182. int page_group_by_mobility_disabled __read_mostly;
  183. static void set_pageblock_migratetype(struct page *page, int migratetype)
  184. {
  185. if (unlikely(page_group_by_mobility_disabled))
  186. migratetype = MIGRATE_UNMOVABLE;
  187. set_pageblock_flags_group(page, (unsigned long)migratetype,
  188. PB_migrate, PB_migrate_end);
  189. }
  190. bool oom_killer_disabled __read_mostly;
  191. #ifdef CONFIG_DEBUG_VM
  192. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  193. {
  194. int ret = 0;
  195. unsigned seq;
  196. unsigned long pfn = page_to_pfn(page);
  197. do {
  198. seq = zone_span_seqbegin(zone);
  199. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  200. ret = 1;
  201. else if (pfn < zone->zone_start_pfn)
  202. ret = 1;
  203. } while (zone_span_seqretry(zone, seq));
  204. return ret;
  205. }
  206. static int page_is_consistent(struct zone *zone, struct page *page)
  207. {
  208. if (!pfn_valid_within(page_to_pfn(page)))
  209. return 0;
  210. if (zone != page_zone(page))
  211. return 0;
  212. return 1;
  213. }
  214. /*
  215. * Temporary debugging check for pages not lying within a given zone.
  216. */
  217. static int bad_range(struct zone *zone, struct page *page)
  218. {
  219. if (page_outside_zone_boundaries(zone, page))
  220. return 1;
  221. if (!page_is_consistent(zone, page))
  222. return 1;
  223. return 0;
  224. }
  225. #else
  226. static inline int bad_range(struct zone *zone, struct page *page)
  227. {
  228. return 0;
  229. }
  230. #endif
  231. static void bad_page(struct page *page)
  232. {
  233. static unsigned long resume;
  234. static unsigned long nr_shown;
  235. static unsigned long nr_unshown;
  236. /* Don't complain about poisoned pages */
  237. if (PageHWPoison(page)) {
  238. reset_page_mapcount(page); /* remove PageBuddy */
  239. return;
  240. }
  241. /*
  242. * Allow a burst of 60 reports, then keep quiet for that minute;
  243. * or allow a steady drip of one report per second.
  244. */
  245. if (nr_shown == 60) {
  246. if (time_before(jiffies, resume)) {
  247. nr_unshown++;
  248. goto out;
  249. }
  250. if (nr_unshown) {
  251. printk(KERN_ALERT
  252. "BUG: Bad page state: %lu messages suppressed\n",
  253. nr_unshown);
  254. nr_unshown = 0;
  255. }
  256. nr_shown = 0;
  257. }
  258. if (nr_shown++ == 0)
  259. resume = jiffies + 60 * HZ;
  260. printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
  261. current->comm, page_to_pfn(page));
  262. dump_page(page);
  263. print_modules();
  264. dump_stack();
  265. out:
  266. /* Leave bad fields for debug, except PageBuddy could make trouble */
  267. reset_page_mapcount(page); /* remove PageBuddy */
  268. add_taint(TAINT_BAD_PAGE);
  269. }
  270. /*
  271. * Higher-order pages are called "compound pages". They are structured thusly:
  272. *
  273. * The first PAGE_SIZE page is called the "head page".
  274. *
  275. * The remaining PAGE_SIZE pages are called "tail pages".
  276. *
  277. * All pages have PG_compound set. All tail pages have their ->first_page
  278. * pointing at the head page.
  279. *
  280. * The first tail page's ->lru.next holds the address of the compound page's
  281. * put_page() function. Its ->lru.prev holds the order of allocation.
  282. * This usage means that zero-order pages may not be compound.
  283. */
  284. static void free_compound_page(struct page *page)
  285. {
  286. __free_pages_ok(page, compound_order(page));
  287. }
  288. void prep_compound_page(struct page *page, unsigned long order)
  289. {
  290. int i;
  291. int nr_pages = 1 << order;
  292. set_compound_page_dtor(page, free_compound_page);
  293. set_compound_order(page, order);
  294. __SetPageHead(page);
  295. for (i = 1; i < nr_pages; i++) {
  296. struct page *p = page + i;
  297. __SetPageTail(p);
  298. set_page_count(p, 0);
  299. p->first_page = page;
  300. }
  301. }
  302. /* update __split_huge_page_refcount if you change this function */
  303. static int destroy_compound_page(struct page *page, unsigned long order)
  304. {
  305. int i;
  306. int nr_pages = 1 << order;
  307. int bad = 0;
  308. if (unlikely(compound_order(page) != order) ||
  309. unlikely(!PageHead(page))) {
  310. bad_page(page);
  311. bad++;
  312. }
  313. __ClearPageHead(page);
  314. for (i = 1; i < nr_pages; i++) {
  315. struct page *p = page + i;
  316. if (unlikely(!PageTail(p) || (p->first_page != page))) {
  317. bad_page(page);
  318. bad++;
  319. }
  320. __ClearPageTail(p);
  321. }
  322. return bad;
  323. }
  324. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  325. {
  326. int i;
  327. /*
  328. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  329. * and __GFP_HIGHMEM from hard or soft interrupt context.
  330. */
  331. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  332. for (i = 0; i < (1 << order); i++)
  333. clear_highpage(page + i);
  334. }
  335. static inline void set_page_order(struct page *page, int order)
  336. {
  337. set_page_private(page, order);
  338. __SetPageBuddy(page);
  339. }
  340. static inline void rmv_page_order(struct page *page)
  341. {
  342. __ClearPageBuddy(page);
  343. set_page_private(page, 0);
  344. }
  345. /*
  346. * Locate the struct page for both the matching buddy in our
  347. * pair (buddy1) and the combined O(n+1) page they form (page).
  348. *
  349. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  350. * the following equation:
  351. * B2 = B1 ^ (1 << O)
  352. * For example, if the starting buddy (buddy2) is #8 its order
  353. * 1 buddy is #10:
  354. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  355. *
  356. * 2) Any buddy B will have an order O+1 parent P which
  357. * satisfies the following equation:
  358. * P = B & ~(1 << O)
  359. *
  360. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  361. */
  362. static inline unsigned long
  363. __find_buddy_index(unsigned long page_idx, unsigned int order)
  364. {
  365. return page_idx ^ (1 << order);
  366. }
  367. /*
  368. * This function checks whether a page is free && is the buddy
  369. * we can do coalesce a page and its buddy if
  370. * (a) the buddy is not in a hole &&
  371. * (b) the buddy is in the buddy system &&
  372. * (c) a page and its buddy have the same order &&
  373. * (d) a page and its buddy are in the same zone.
  374. *
  375. * For recording whether a page is in the buddy system, we set ->_mapcount -2.
  376. * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
  377. *
  378. * For recording page's order, we use page_private(page).
  379. */
  380. static inline int page_is_buddy(struct page *page, struct page *buddy,
  381. int order)
  382. {
  383. if (!pfn_valid_within(page_to_pfn(buddy)))
  384. return 0;
  385. if (page_zone_id(page) != page_zone_id(buddy))
  386. return 0;
  387. if (PageBuddy(buddy) && page_order(buddy) == order) {
  388. VM_BUG_ON(page_count(buddy) != 0);
  389. return 1;
  390. }
  391. return 0;
  392. }
  393. /*
  394. * Freeing function for a buddy system allocator.
  395. *
  396. * The concept of a buddy system is to maintain direct-mapped table
  397. * (containing bit values) for memory blocks of various "orders".
  398. * The bottom level table contains the map for the smallest allocatable
  399. * units of memory (here, pages), and each level above it describes
  400. * pairs of units from the levels below, hence, "buddies".
  401. * At a high level, all that happens here is marking the table entry
  402. * at the bottom level available, and propagating the changes upward
  403. * as necessary, plus some accounting needed to play nicely with other
  404. * parts of the VM system.
  405. * At each level, we keep a list of pages, which are heads of continuous
  406. * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  407. * order is recorded in page_private(page) field.
  408. * So when we are allocating or freeing one, we can derive the state of the
  409. * other. That is, if we allocate a small block, and both were
  410. * free, the remainder of the region must be split into blocks.
  411. * If a block is freed, and its buddy is also free, then this
  412. * triggers coalescing into a block of larger size.
  413. *
  414. * -- wli
  415. */
  416. static inline void __free_one_page(struct page *page,
  417. struct zone *zone, unsigned int order,
  418. int migratetype)
  419. {
  420. unsigned long page_idx;
  421. unsigned long combined_idx;
  422. unsigned long uninitialized_var(buddy_idx);
  423. struct page *buddy;
  424. if (unlikely(PageCompound(page)))
  425. if (unlikely(destroy_compound_page(page, order)))
  426. return;
  427. VM_BUG_ON(migratetype == -1);
  428. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  429. VM_BUG_ON(page_idx & ((1 << order) - 1));
  430. VM_BUG_ON(bad_range(zone, page));
  431. while (order < MAX_ORDER-1) {
  432. buddy_idx = __find_buddy_index(page_idx, order);
  433. buddy = page + (buddy_idx - page_idx);
  434. if (!page_is_buddy(page, buddy, order))
  435. break;
  436. /* Our buddy is free, merge with it and move up one order. */
  437. list_del(&buddy->lru);
  438. zone->free_area[order].nr_free--;
  439. rmv_page_order(buddy);
  440. combined_idx = buddy_idx & page_idx;
  441. page = page + (combined_idx - page_idx);
  442. page_idx = combined_idx;
  443. order++;
  444. }
  445. set_page_order(page, order);
  446. /*
  447. * If this is not the largest possible page, check if the buddy
  448. * of the next-highest order is free. If it is, it's possible
  449. * that pages are being freed that will coalesce soon. In case,
  450. * that is happening, add the free page to the tail of the list
  451. * so it's less likely to be used soon and more likely to be merged
  452. * as a higher order page
  453. */
  454. if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  455. struct page *higher_page, *higher_buddy;
  456. combined_idx = buddy_idx & page_idx;
  457. higher_page = page + (combined_idx - page_idx);
  458. buddy_idx = __find_buddy_index(combined_idx, order + 1);
  459. higher_buddy = page + (buddy_idx - combined_idx);
  460. if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  461. list_add_tail(&page->lru,
  462. &zone->free_area[order].free_list[migratetype]);
  463. goto out;
  464. }
  465. }
  466. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  467. out:
  468. zone->free_area[order].nr_free++;
  469. }
  470. /*
  471. * free_page_mlock() -- clean up attempts to free and mlocked() page.
  472. * Page should not be on lru, so no need to fix that up.
  473. * free_pages_check() will verify...
  474. */
  475. static inline void free_page_mlock(struct page *page)
  476. {
  477. __dec_zone_page_state(page, NR_MLOCK);
  478. __count_vm_event(UNEVICTABLE_MLOCKFREED);
  479. }
  480. static inline int free_pages_check(struct page *page)
  481. {
  482. if (unlikely(page_mapcount(page) |
  483. (page->mapping != NULL) |
  484. (atomic_read(&page->_count) != 0) |
  485. (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
  486. (mem_cgroup_bad_page_check(page)))) {
  487. bad_page(page);
  488. return 1;
  489. }
  490. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  491. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  492. return 0;
  493. }
  494. /*
  495. * Frees a number of pages from the PCP lists
  496. * Assumes all pages on list are in same zone, and of same order.
  497. * count is the number of pages to free.
  498. *
  499. * If the zone was previously in an "all pages pinned" state then look to
  500. * see if this freeing clears that state.
  501. *
  502. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  503. * pinned" detection logic.
  504. */
  505. static void free_pcppages_bulk(struct zone *zone, int count,
  506. struct per_cpu_pages *pcp)
  507. {
  508. int migratetype = 0;
  509. int batch_free = 0;
  510. int to_free = count;
  511. spin_lock(&zone->lock);
  512. zone->all_unreclaimable = 0;
  513. zone->pages_scanned = 0;
  514. while (to_free) {
  515. struct page *page;
  516. struct list_head *list;
  517. /*
  518. * Remove pages from lists in a round-robin fashion. A
  519. * batch_free count is maintained that is incremented when an
  520. * empty list is encountered. This is so more pages are freed
  521. * off fuller lists instead of spinning excessively around empty
  522. * lists
  523. */
  524. do {
  525. batch_free++;
  526. if (++migratetype == MIGRATE_PCPTYPES)
  527. migratetype = 0;
  528. list = &pcp->lists[migratetype];
  529. } while (list_empty(list));
  530. /* This is the only non-empty list. Free them all. */
  531. if (batch_free == MIGRATE_PCPTYPES)
  532. batch_free = to_free;
  533. do {
  534. page = list_entry(list->prev, struct page, lru);
  535. /* must delete as __free_one_page list manipulates */
  536. list_del(&page->lru);
  537. /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
  538. __free_one_page(page, zone, 0, page_private(page));
  539. trace_mm_page_pcpu_drain(page, 0, page_private(page));
  540. } while (--to_free && --batch_free && !list_empty(list));
  541. }
  542. __mod_zone_page_state(zone, NR_FREE_PAGES, count);
  543. spin_unlock(&zone->lock);
  544. }
  545. static void free_one_page(struct zone *zone, struct page *page, int order,
  546. int migratetype)
  547. {
  548. spin_lock(&zone->lock);
  549. zone->all_unreclaimable = 0;
  550. zone->pages_scanned = 0;
  551. __free_one_page(page, zone, order, migratetype);
  552. __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
  553. spin_unlock(&zone->lock);
  554. }
  555. static bool free_pages_prepare(struct page *page, unsigned int order)
  556. {
  557. int i;
  558. int bad = 0;
  559. trace_mm_page_free(page, order);
  560. kmemcheck_free_shadow(page, order);
  561. if (PageAnon(page))
  562. page->mapping = NULL;
  563. for (i = 0; i < (1 << order); i++)
  564. bad += free_pages_check(page + i);
  565. if (bad)
  566. return false;
  567. if (!PageHighMem(page)) {
  568. debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  569. debug_check_no_obj_freed(page_address(page),
  570. PAGE_SIZE << order);
  571. }
  572. arch_free_page(page, order);
  573. kernel_map_pages(page, 1 << order, 0);
  574. return true;
  575. }
  576. static void __free_pages_ok(struct page *page, unsigned int order)
  577. {
  578. unsigned long flags;
  579. int wasMlocked = __TestClearPageMlocked(page);
  580. if (!free_pages_prepare(page, order))
  581. return;
  582. local_irq_save(flags);
  583. if (unlikely(wasMlocked))
  584. free_page_mlock(page);
  585. __count_vm_events(PGFREE, 1 << order);
  586. free_one_page(page_zone(page), page, order,
  587. get_pageblock_migratetype(page));
  588. local_irq_restore(flags);
  589. }
  590. /*
  591. * permit the bootmem allocator to evade page validation on high-order frees
  592. */
  593. void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
  594. {
  595. if (order == 0) {
  596. __ClearPageReserved(page);
  597. set_page_count(page, 0);
  598. set_page_refcounted(page);
  599. __free_page(page);
  600. } else {
  601. int loop;
  602. prefetchw(page);
  603. for (loop = 0; loop < (1 << order); loop++) {
  604. struct page *p = &page[loop];
  605. if (loop + 1 < (1 << order))
  606. prefetchw(p + 1);
  607. __ClearPageReserved(p);
  608. set_page_count(p, 0);
  609. }
  610. set_page_refcounted(page);
  611. __free_pages(page, order);
  612. }
  613. }
  614. /*
  615. * The order of subdivision here is critical for the IO subsystem.
  616. * Please do not alter this order without good reasons and regression
  617. * testing. Specifically, as large blocks of memory are subdivided,
  618. * the order in which smaller blocks are delivered depends on the order
  619. * they're subdivided in this function. This is the primary factor
  620. * influencing the order in which pages are delivered to the IO
  621. * subsystem according to empirical testing, and this is also justified
  622. * by considering the behavior of a buddy system containing a single
  623. * large block of memory acted on by a series of small allocations.
  624. * This behavior is a critical factor in sglist merging's success.
  625. *
  626. * -- wli
  627. */
  628. static inline void expand(struct zone *zone, struct page *page,
  629. int low, int high, struct free_area *area,
  630. int migratetype)
  631. {
  632. unsigned long size = 1 << high;
  633. while (high > low) {
  634. area--;
  635. high--;
  636. size >>= 1;
  637. VM_BUG_ON(bad_range(zone, &page[size]));
  638. list_add(&page[size].lru, &area->free_list[migratetype]);
  639. area->nr_free++;
  640. set_page_order(&page[size], high);
  641. }
  642. }
  643. /*
  644. * This page is about to be returned from the page allocator
  645. */
  646. static inline int check_new_page(struct page *page)
  647. {
  648. if (unlikely(page_mapcount(page) |
  649. (page->mapping != NULL) |
  650. (atomic_read(&page->_count) != 0) |
  651. (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
  652. (mem_cgroup_bad_page_check(page)))) {
  653. bad_page(page);
  654. return 1;
  655. }
  656. return 0;
  657. }
  658. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  659. {
  660. int i;
  661. for (i = 0; i < (1 << order); i++) {
  662. struct page *p = page + i;
  663. if (unlikely(check_new_page(p)))
  664. return 1;
  665. }
  666. set_page_private(page, 0);
  667. set_page_refcounted(page);
  668. arch_alloc_page(page, order);
  669. kernel_map_pages(page, 1 << order, 1);
  670. if (gfp_flags & __GFP_ZERO)
  671. prep_zero_page(page, order, gfp_flags);
  672. if (order && (gfp_flags & __GFP_COMP))
  673. prep_compound_page(page, order);
  674. return 0;
  675. }
  676. /*
  677. * Go through the free lists for the given migratetype and remove
  678. * the smallest available page from the freelists
  679. */
  680. static inline
  681. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  682. int migratetype)
  683. {
  684. unsigned int current_order;
  685. struct free_area * area;
  686. struct page *page;
  687. /* Find a page of the appropriate size in the preferred list */
  688. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  689. area = &(zone->free_area[current_order]);
  690. if (list_empty(&area->free_list[migratetype]))
  691. continue;
  692. page = list_entry(area->free_list[migratetype].next,
  693. struct page, lru);
  694. list_del(&page->lru);
  695. rmv_page_order(page);
  696. area->nr_free--;
  697. expand(zone, page, order, current_order, area, migratetype);
  698. return page;
  699. }
  700. return NULL;
  701. }
  702. /*
  703. * This array describes the order lists are fallen back to when
  704. * the free lists for the desirable migrate type are depleted
  705. */
  706. static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  707. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  708. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  709. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  710. [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
  711. };
  712. /*
  713. * Move the free pages in a range to the free lists of the requested type.
  714. * Note that start_page and end_pages are not aligned on a pageblock
  715. * boundary. If alignment is required, use move_freepages_block()
  716. */
  717. static int move_freepages(struct zone *zone,
  718. struct page *start_page, struct page *end_page,
  719. int migratetype)
  720. {
  721. struct page *page;
  722. unsigned long order;
  723. int pages_moved = 0;
  724. #ifndef CONFIG_HOLES_IN_ZONE
  725. /*
  726. * page_zone is not safe to call in this context when
  727. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  728. * anyway as we check zone boundaries in move_freepages_block().
  729. * Remove at a later date when no bug reports exist related to
  730. * grouping pages by mobility
  731. */
  732. BUG_ON(page_zone(start_page) != page_zone(end_page));
  733. #endif
  734. for (page = start_page; page <= end_page;) {
  735. /* Make sure we are not inadvertently changing nodes */
  736. VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
  737. if (!pfn_valid_within(page_to_pfn(page))) {
  738. page++;
  739. continue;
  740. }
  741. if (!PageBuddy(page)) {
  742. page++;
  743. continue;
  744. }
  745. order = page_order(page);
  746. list_move(&page->lru,
  747. &zone->free_area[order].free_list[migratetype]);
  748. page += 1 << order;
  749. pages_moved += 1 << order;
  750. }
  751. return pages_moved;
  752. }
  753. static int move_freepages_block(struct zone *zone, struct page *page,
  754. int migratetype)
  755. {
  756. unsigned long start_pfn, end_pfn;
  757. struct page *start_page, *end_page;
  758. start_pfn = page_to_pfn(page);
  759. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  760. start_page = pfn_to_page(start_pfn);
  761. end_page = start_page + pageblock_nr_pages - 1;
  762. end_pfn = start_pfn + pageblock_nr_pages - 1;
  763. /* Do not cross zone boundaries */
  764. if (start_pfn < zone->zone_start_pfn)
  765. start_page = page;
  766. if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
  767. return 0;
  768. return move_freepages(zone, start_page, end_page, migratetype);
  769. }
  770. static void change_pageblock_range(struct page *pageblock_page,
  771. int start_order, int migratetype)
  772. {
  773. int nr_pageblocks = 1 << (start_order - pageblock_order);
  774. while (nr_pageblocks--) {
  775. set_pageblock_migratetype(pageblock_page, migratetype);
  776. pageblock_page += pageblock_nr_pages;
  777. }
  778. }
  779. /* Remove an element from the buddy allocator from the fallback list */
  780. static inline struct page *
  781. __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  782. {
  783. struct free_area * area;
  784. int current_order;
  785. struct page *page;
  786. int migratetype, i;
  787. /* Find the largest possible block of pages in the other list */
  788. for (current_order = MAX_ORDER-1; current_order >= order;
  789. --current_order) {
  790. for (i = 0; i < MIGRATE_TYPES - 1; i++) {
  791. migratetype = fallbacks[start_migratetype][i];
  792. /* MIGRATE_RESERVE handled later if necessary */
  793. if (migratetype == MIGRATE_RESERVE)
  794. continue;
  795. area = &(zone->free_area[current_order]);
  796. if (list_empty(&area->free_list[migratetype]))
  797. continue;
  798. page = list_entry(area->free_list[migratetype].next,
  799. struct page, lru);
  800. area->nr_free--;
  801. /*
  802. * If breaking a large block of pages, move all free
  803. * pages to the preferred allocation list. If falling
  804. * back for a reclaimable kernel allocation, be more
  805. * aggressive about taking ownership of free pages
  806. */
  807. if (unlikely(current_order >= (pageblock_order >> 1)) ||
  808. start_migratetype == MIGRATE_RECLAIMABLE ||
  809. page_group_by_mobility_disabled) {
  810. unsigned long pages;
  811. pages = move_freepages_block(zone, page,
  812. start_migratetype);
  813. /* Claim the whole block if over half of it is free */
  814. if (pages >= (1 << (pageblock_order-1)) ||
  815. page_group_by_mobility_disabled)
  816. set_pageblock_migratetype(page,
  817. start_migratetype);
  818. migratetype = start_migratetype;
  819. }
  820. /* Remove the page from the freelists */
  821. list_del(&page->lru);
  822. rmv_page_order(page);
  823. /* Take ownership for orders >= pageblock_order */
  824. if (current_order >= pageblock_order)
  825. change_pageblock_range(page, current_order,
  826. start_migratetype);
  827. expand(zone, page, order, current_order, area, migratetype);
  828. trace_mm_page_alloc_extfrag(page, order, current_order,
  829. start_migratetype, migratetype);
  830. return page;
  831. }
  832. }
  833. return NULL;
  834. }
  835. /*
  836. * Do the hard work of removing an element from the buddy allocator.
  837. * Call me with the zone->lock already held.
  838. */
  839. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  840. int migratetype)
  841. {
  842. struct page *page;
  843. retry_reserve:
  844. page = __rmqueue_smallest(zone, order, migratetype);
  845. if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
  846. page = __rmqueue_fallback(zone, order, migratetype);
  847. /*
  848. * Use MIGRATE_RESERVE rather than fail an allocation. goto
  849. * is used because __rmqueue_smallest is an inline function
  850. * and we want just one call site
  851. */
  852. if (!page) {
  853. migratetype = MIGRATE_RESERVE;
  854. goto retry_reserve;
  855. }
  856. }
  857. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  858. return page;
  859. }
  860. /*
  861. * Obtain a specified number of elements from the buddy allocator, all under
  862. * a single hold of the lock, for efficiency. Add them to the supplied list.
  863. * Returns the number of new pages which were placed at *list.
  864. */
  865. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  866. unsigned long count, struct list_head *list,
  867. int migratetype, int cold)
  868. {
  869. int i;
  870. spin_lock(&zone->lock);
  871. for (i = 0; i < count; ++i) {
  872. struct page *page = __rmqueue(zone, order, migratetype);
  873. if (unlikely(page == NULL))
  874. break;
  875. /*
  876. * Split buddy pages returned by expand() are received here
  877. * in physical page order. The page is added to the callers and
  878. * list and the list head then moves forward. From the callers
  879. * perspective, the linked list is ordered by page number in
  880. * some conditions. This is useful for IO devices that can
  881. * merge IO requests if the physical pages are ordered
  882. * properly.
  883. */
  884. if (likely(cold == 0))
  885. list_add(&page->lru, list);
  886. else
  887. list_add_tail(&page->lru, list);
  888. set_page_private(page, migratetype);
  889. list = &page->lru;
  890. }
  891. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  892. spin_unlock(&zone->lock);
  893. return i;
  894. }
  895. #ifdef CONFIG_NUMA
  896. /*
  897. * Called from the vmstat counter updater to drain pagesets of this
  898. * currently executing processor on remote nodes after they have
  899. * expired.
  900. *
  901. * Note that this function must be called with the thread pinned to
  902. * a single processor.
  903. */
  904. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  905. {
  906. unsigned long flags;
  907. int to_drain;
  908. local_irq_save(flags);
  909. if (pcp->count >= pcp->batch)
  910. to_drain = pcp->batch;
  911. else
  912. to_drain = pcp->count;
  913. free_pcppages_bulk(zone, to_drain, pcp);
  914. pcp->count -= to_drain;
  915. local_irq_restore(flags);
  916. }
  917. #endif
  918. /*
  919. * Drain pages of the indicated processor.
  920. *
  921. * The processor must either be the current processor and the
  922. * thread pinned to the current processor or a processor that
  923. * is not online.
  924. */
  925. static void drain_pages(unsigned int cpu)
  926. {
  927. unsigned long flags;
  928. struct zone *zone;
  929. for_each_populated_zone(zone) {
  930. struct per_cpu_pageset *pset;
  931. struct per_cpu_pages *pcp;
  932. local_irq_save(flags);
  933. pset = per_cpu_ptr(zone->pageset, cpu);
  934. pcp = &pset->pcp;
  935. if (pcp->count) {
  936. free_pcppages_bulk(zone, pcp->count, pcp);
  937. pcp->count = 0;
  938. }
  939. local_irq_restore(flags);
  940. }
  941. }
  942. /*
  943. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  944. */
  945. void drain_local_pages(void *arg)
  946. {
  947. drain_pages(smp_processor_id());
  948. }
  949. /*
  950. * Spill all the per-cpu pages from all CPUs back into the buddy allocator
  951. */
  952. void drain_all_pages(void)
  953. {
  954. on_each_cpu(drain_local_pages, NULL, 1);
  955. }
  956. #ifdef CONFIG_HIBERNATION
  957. void mark_free_pages(struct zone *zone)
  958. {
  959. unsigned long pfn, max_zone_pfn;
  960. unsigned long flags;
  961. int order, t;
  962. struct list_head *curr;
  963. if (!zone->spanned_pages)
  964. return;
  965. spin_lock_irqsave(&zone->lock, flags);
  966. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  967. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  968. if (pfn_valid(pfn)) {
  969. struct page *page = pfn_to_page(pfn);
  970. if (!swsusp_page_is_forbidden(page))
  971. swsusp_unset_page_free(page);
  972. }
  973. for_each_migratetype_order(order, t) {
  974. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  975. unsigned long i;
  976. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  977. for (i = 0; i < (1UL << order); i++)
  978. swsusp_set_page_free(pfn_to_page(pfn + i));
  979. }
  980. }
  981. spin_unlock_irqrestore(&zone->lock, flags);
  982. }
  983. #endif /* CONFIG_PM */
  984. /*
  985. * Free a 0-order page
  986. * cold == 1 ? free a cold page : free a hot page
  987. */
  988. void free_hot_cold_page(struct page *page, int cold)
  989. {
  990. struct zone *zone = page_zone(page);
  991. struct per_cpu_pages *pcp;
  992. unsigned long flags;
  993. int migratetype;
  994. int wasMlocked = __TestClearPageMlocked(page);
  995. if (!free_pages_prepare(page, 0))
  996. return;
  997. migratetype = get_pageblock_migratetype(page);
  998. set_page_private(page, migratetype);
  999. local_irq_save(flags);
  1000. if (unlikely(wasMlocked))
  1001. free_page_mlock(page);
  1002. __count_vm_event(PGFREE);
  1003. /*
  1004. * We only track unmovable, reclaimable and movable on pcp lists.
  1005. * Free ISOLATE pages back to the allocator because they are being
  1006. * offlined but treat RESERVE as movable pages so we can get those
  1007. * areas back if necessary. Otherwise, we may have to free
  1008. * excessively into the page allocator
  1009. */
  1010. if (migratetype >= MIGRATE_PCPTYPES) {
  1011. if (unlikely(migratetype == MIGRATE_ISOLATE)) {
  1012. free_one_page(zone, page, 0, migratetype);
  1013. goto out;
  1014. }
  1015. migratetype = MIGRATE_MOVABLE;
  1016. }
  1017. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1018. if (cold)
  1019. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  1020. else
  1021. list_add(&page->lru, &pcp->lists[migratetype]);
  1022. pcp->count++;
  1023. if (pcp->count >= pcp->high) {
  1024. free_pcppages_bulk(zone, pcp->batch, pcp);
  1025. pcp->count -= pcp->batch;
  1026. }
  1027. out:
  1028. local_irq_restore(flags);
  1029. }
  1030. /*
  1031. * Free a list of 0-order pages
  1032. */
  1033. void free_hot_cold_page_list(struct list_head *list, int cold)
  1034. {
  1035. struct page *page, *next;
  1036. list_for_each_entry_safe(page, next, list, lru) {
  1037. trace_mm_page_free_batched(page, cold);
  1038. free_hot_cold_page(page, cold);
  1039. }
  1040. }
  1041. /*
  1042. * split_page takes a non-compound higher-order page, and splits it into
  1043. * n (1<<order) sub-pages: page[0..n]
  1044. * Each sub-page must be freed individually.
  1045. *
  1046. * Note: this is probably too low level an operation for use in drivers.
  1047. * Please consult with lkml before using this in your driver.
  1048. */
  1049. void split_page(struct page *page, unsigned int order)
  1050. {
  1051. int i;
  1052. VM_BUG_ON(PageCompound(page));
  1053. VM_BUG_ON(!page_count(page));
  1054. #ifdef CONFIG_KMEMCHECK
  1055. /*
  1056. * Split shadow pages too, because free(page[0]) would
  1057. * otherwise free the whole shadow.
  1058. */
  1059. if (kmemcheck_page_is_tracked(page))
  1060. split_page(virt_to_page(page[0].shadow), order);
  1061. #endif
  1062. for (i = 1; i < (1 << order); i++)
  1063. set_page_refcounted(page + i);
  1064. }
  1065. /*
  1066. * Similar to split_page except the page is already free. As this is only
  1067. * being used for migration, the migratetype of the block also changes.
  1068. * As this is called with interrupts disabled, the caller is responsible
  1069. * for calling arch_alloc_page() and kernel_map_page() after interrupts
  1070. * are enabled.
  1071. *
  1072. * Note: this is probably too low level an operation for use in drivers.
  1073. * Please consult with lkml before using this in your driver.
  1074. */
  1075. int split_free_page(struct page *page)
  1076. {
  1077. unsigned int order;
  1078. unsigned long watermark;
  1079. struct zone *zone;
  1080. BUG_ON(!PageBuddy(page));
  1081. zone = page_zone(page);
  1082. order = page_order(page);
  1083. /* Obey watermarks as if the page was being allocated */
  1084. watermark = low_wmark_pages(zone) + (1 << order);
  1085. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  1086. return 0;
  1087. /* Remove page from free list */
  1088. list_del(&page->lru);
  1089. zone->free_area[order].nr_free--;
  1090. rmv_page_order(page);
  1091. __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
  1092. /* Split into individual pages */
  1093. set_page_refcounted(page);
  1094. split_page(page, order);
  1095. if (order >= pageblock_order - 1) {
  1096. struct page *endpage = page + (1 << order) - 1;
  1097. for (; page < endpage; page += pageblock_nr_pages)
  1098. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1099. }
  1100. return 1 << order;
  1101. }
  1102. /*
  1103. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  1104. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  1105. * or two.
  1106. */
  1107. static inline
  1108. struct page *buffered_rmqueue(struct zone *preferred_zone,
  1109. struct zone *zone, int order, gfp_t gfp_flags,
  1110. int migratetype)
  1111. {
  1112. unsigned long flags;
  1113. struct page *page;
  1114. int cold = !!(gfp_flags & __GFP_COLD);
  1115. again:
  1116. if (likely(order == 0)) {
  1117. struct per_cpu_pages *pcp;
  1118. struct list_head *list;
  1119. local_irq_save(flags);
  1120. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1121. list = &pcp->lists[migratetype];
  1122. if (list_empty(list)) {
  1123. pcp->count += rmqueue_bulk(zone, 0,
  1124. pcp->batch, list,
  1125. migratetype, cold);
  1126. if (unlikely(list_empty(list)))
  1127. goto failed;
  1128. }
  1129. if (cold)
  1130. page = list_entry(list->prev, struct page, lru);
  1131. else
  1132. page = list_entry(list->next, struct page, lru);
  1133. list_del(&page->lru);
  1134. pcp->count--;
  1135. } else {
  1136. if (unlikely(gfp_flags & __GFP_NOFAIL)) {
  1137. /*
  1138. * __GFP_NOFAIL is not to be used in new code.
  1139. *
  1140. * All __GFP_NOFAIL callers should be fixed so that they
  1141. * properly detect and handle allocation failures.
  1142. *
  1143. * We most definitely don't want callers attempting to
  1144. * allocate greater than order-1 page units with
  1145. * __GFP_NOFAIL.
  1146. */
  1147. WARN_ON_ONCE(order > 1);
  1148. }
  1149. spin_lock_irqsave(&zone->lock, flags);
  1150. page = __rmqueue(zone, order, migratetype);
  1151. spin_unlock(&zone->lock);
  1152. if (!page)
  1153. goto failed;
  1154. __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
  1155. }
  1156. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  1157. zone_statistics(preferred_zone, zone, gfp_flags);
  1158. local_irq_restore(flags);
  1159. VM_BUG_ON(bad_range(zone, page));
  1160. if (prep_new_page(page, order, gfp_flags))
  1161. goto again;
  1162. return page;
  1163. failed:
  1164. local_irq_restore(flags);
  1165. return NULL;
  1166. }
  1167. /* The ALLOC_WMARK bits are used as an index to zone->watermark */
  1168. #define ALLOC_WMARK_MIN WMARK_MIN
  1169. #define ALLOC_WMARK_LOW WMARK_LOW
  1170. #define ALLOC_WMARK_HIGH WMARK_HIGH
  1171. #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
  1172. /* Mask to get the watermark bits */
  1173. #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
  1174. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  1175. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  1176. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  1177. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1178. static struct {
  1179. struct fault_attr attr;
  1180. u32 ignore_gfp_highmem;
  1181. u32 ignore_gfp_wait;
  1182. u32 min_order;
  1183. } fail_page_alloc = {
  1184. .attr = FAULT_ATTR_INITIALIZER,
  1185. .ignore_gfp_wait = 1,
  1186. .ignore_gfp_highmem = 1,
  1187. .min_order = 1,
  1188. };
  1189. static int __init setup_fail_page_alloc(char *str)
  1190. {
  1191. return setup_fault_attr(&fail_page_alloc.attr, str);
  1192. }
  1193. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1194. static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1195. {
  1196. if (order < fail_page_alloc.min_order)
  1197. return 0;
  1198. if (gfp_mask & __GFP_NOFAIL)
  1199. return 0;
  1200. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1201. return 0;
  1202. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1203. return 0;
  1204. return should_fail(&fail_page_alloc.attr, 1 << order);
  1205. }
  1206. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1207. static int __init fail_page_alloc_debugfs(void)
  1208. {
  1209. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1210. struct dentry *dir;
  1211. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  1212. &fail_page_alloc.attr);
  1213. if (IS_ERR(dir))
  1214. return PTR_ERR(dir);
  1215. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1216. &fail_page_alloc.ignore_gfp_wait))
  1217. goto fail;
  1218. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1219. &fail_page_alloc.ignore_gfp_highmem))
  1220. goto fail;
  1221. if (!debugfs_create_u32("min-order", mode, dir,
  1222. &fail_page_alloc.min_order))
  1223. goto fail;
  1224. return 0;
  1225. fail:
  1226. debugfs_remove_recursive(dir);
  1227. return -ENOMEM;
  1228. }
  1229. late_initcall(fail_page_alloc_debugfs);
  1230. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1231. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1232. static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1233. {
  1234. return 0;
  1235. }
  1236. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1237. /*
  1238. * Return true if free pages are above 'mark'. This takes into account the order
  1239. * of the allocation.
  1240. */
  1241. static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  1242. int classzone_idx, int alloc_flags, long free_pages)
  1243. {
  1244. /* free_pages my go negative - that's OK */
  1245. long min = mark;
  1246. int o;
  1247. free_pages -= (1 << order) + 1;
  1248. if (alloc_flags & ALLOC_HIGH)
  1249. min -= min / 2;
  1250. if (alloc_flags & ALLOC_HARDER)
  1251. min -= min / 4;
  1252. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  1253. return false;
  1254. for (o = 0; o < order; o++) {
  1255. /* At the next order, this order's pages become unavailable */
  1256. free_pages -= z->free_area[o].nr_free << o;
  1257. /* Require fewer higher order pages to be free */
  1258. min >>= 1;
  1259. if (free_pages <= min)
  1260. return false;
  1261. }
  1262. return true;
  1263. }
  1264. bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  1265. int classzone_idx, int alloc_flags)
  1266. {
  1267. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1268. zone_page_state(z, NR_FREE_PAGES));
  1269. }
  1270. bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
  1271. int classzone_idx, int alloc_flags)
  1272. {
  1273. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  1274. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  1275. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  1276. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1277. free_pages);
  1278. }
  1279. #ifdef CONFIG_NUMA
  1280. /*
  1281. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1282. * skip over zones that are not allowed by the cpuset, or that have
  1283. * been recently (in last second) found to be nearly full. See further
  1284. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1285. * that have to skip over a lot of full or unallowed zones.
  1286. *
  1287. * If the zonelist cache is present in the passed in zonelist, then
  1288. * returns a pointer to the allowed node mask (either the current
  1289. * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
  1290. *
  1291. * If the zonelist cache is not available for this zonelist, does
  1292. * nothing and returns NULL.
  1293. *
  1294. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1295. * a second since last zap'd) then we zap it out (clear its bits.)
  1296. *
  1297. * We hold off even calling zlc_setup, until after we've checked the
  1298. * first zone in the zonelist, on the theory that most allocations will
  1299. * be satisfied from that first zone, so best to examine that zone as
  1300. * quickly as we can.
  1301. */
  1302. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1303. {
  1304. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1305. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1306. zlc = zonelist->zlcache_ptr;
  1307. if (!zlc)
  1308. return NULL;
  1309. if (time_after(jiffies, zlc->last_full_zap + HZ)) {
  1310. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1311. zlc->last_full_zap = jiffies;
  1312. }
  1313. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1314. &cpuset_current_mems_allowed :
  1315. &node_states[N_HIGH_MEMORY];
  1316. return allowednodes;
  1317. }
  1318. /*
  1319. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1320. * if it is worth looking at further for free memory:
  1321. * 1) Check that the zone isn't thought to be full (doesn't have its
  1322. * bit set in the zonelist_cache fullzones BITMAP).
  1323. * 2) Check that the zones node (obtained from the zonelist_cache
  1324. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1325. * Return true (non-zero) if zone is worth looking at further, or
  1326. * else return false (zero) if it is not.
  1327. *
  1328. * This check -ignores- the distinction between various watermarks,
  1329. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1330. * found to be full for any variation of these watermarks, it will
  1331. * be considered full for up to one second by all requests, unless
  1332. * we are so low on memory on all allowed nodes that we are forced
  1333. * into the second scan of the zonelist.
  1334. *
  1335. * In the second scan we ignore this zonelist cache and exactly
  1336. * apply the watermarks to all zones, even it is slower to do so.
  1337. * We are low on memory in the second scan, and should leave no stone
  1338. * unturned looking for a free page.
  1339. */
  1340. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1341. nodemask_t *allowednodes)
  1342. {
  1343. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1344. int i; /* index of *z in zonelist zones */
  1345. int n; /* node that zone *z is on */
  1346. zlc = zonelist->zlcache_ptr;
  1347. if (!zlc)
  1348. return 1;
  1349. i = z - zonelist->_zonerefs;
  1350. n = zlc->z_to_n[i];
  1351. /* This zone is worth trying if it is allowed but not full */
  1352. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1353. }
  1354. /*
  1355. * Given 'z' scanning a zonelist, set the corresponding bit in
  1356. * zlc->fullzones, so that subsequent attempts to allocate a page
  1357. * from that zone don't waste time re-examining it.
  1358. */
  1359. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1360. {
  1361. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1362. int i; /* index of *z in zonelist zones */
  1363. zlc = zonelist->zlcache_ptr;
  1364. if (!zlc)
  1365. return;
  1366. i = z - zonelist->_zonerefs;
  1367. set_bit(i, zlc->fullzones);
  1368. }
  1369. /*
  1370. * clear all zones full, called after direct reclaim makes progress so that
  1371. * a zone that was recently full is not skipped over for up to a second
  1372. */
  1373. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1374. {
  1375. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1376. zlc = zonelist->zlcache_ptr;
  1377. if (!zlc)
  1378. return;
  1379. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1380. }
  1381. #else /* CONFIG_NUMA */
  1382. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1383. {
  1384. return NULL;
  1385. }
  1386. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1387. nodemask_t *allowednodes)
  1388. {
  1389. return 1;
  1390. }
  1391. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1392. {
  1393. }
  1394. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1395. {
  1396. }
  1397. #endif /* CONFIG_NUMA */
  1398. /*
  1399. * get_page_from_freelist goes through the zonelist trying to allocate
  1400. * a page.
  1401. */
  1402. static struct page *
  1403. get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
  1404. struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
  1405. struct zone *preferred_zone, int migratetype)
  1406. {
  1407. struct zoneref *z;
  1408. struct page *page = NULL;
  1409. int classzone_idx;
  1410. struct zone *zone;
  1411. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1412. int zlc_active = 0; /* set if using zonelist_cache */
  1413. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1414. classzone_idx = zone_idx(preferred_zone);
  1415. zonelist_scan:
  1416. /*
  1417. * Scan zonelist, looking for a zone with enough free.
  1418. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1419. */
  1420. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1421. high_zoneidx, nodemask) {
  1422. if (NUMA_BUILD && zlc_active &&
  1423. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1424. continue;
  1425. if ((alloc_flags & ALLOC_CPUSET) &&
  1426. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1427. continue;
  1428. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  1429. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  1430. unsigned long mark;
  1431. int ret;
  1432. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1433. if (zone_watermark_ok(zone, order, mark,
  1434. classzone_idx, alloc_flags))
  1435. goto try_this_zone;
  1436. if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
  1437. /*
  1438. * we do zlc_setup if there are multiple nodes
  1439. * and before considering the first zone allowed
  1440. * by the cpuset.
  1441. */
  1442. allowednodes = zlc_setup(zonelist, alloc_flags);
  1443. zlc_active = 1;
  1444. did_zlc_setup = 1;
  1445. }
  1446. if (zone_reclaim_mode == 0)
  1447. goto this_zone_full;
  1448. /*
  1449. * As we may have just activated ZLC, check if the first
  1450. * eligible zone has failed zone_reclaim recently.
  1451. */
  1452. if (NUMA_BUILD && zlc_active &&
  1453. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1454. continue;
  1455. ret = zone_reclaim(zone, gfp_mask, order);
  1456. switch (ret) {
  1457. case ZONE_RECLAIM_NOSCAN:
  1458. /* did not scan */
  1459. continue;
  1460. case ZONE_RECLAIM_FULL:
  1461. /* scanned but unreclaimable */
  1462. continue;
  1463. default:
  1464. /* did we reclaim enough */
  1465. if (!zone_watermark_ok(zone, order, mark,
  1466. classzone_idx, alloc_flags))
  1467. goto this_zone_full;
  1468. }
  1469. }
  1470. try_this_zone:
  1471. page = buffered_rmqueue(preferred_zone, zone, order,
  1472. gfp_mask, migratetype);
  1473. if (page)
  1474. break;
  1475. this_zone_full:
  1476. if (NUMA_BUILD)
  1477. zlc_mark_zone_full(zonelist, z);
  1478. }
  1479. if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
  1480. /* Disable zlc cache for second zonelist scan */
  1481. zlc_active = 0;
  1482. goto zonelist_scan;
  1483. }
  1484. return page;
  1485. }
  1486. /*
  1487. * Large machines with many possible nodes should not always dump per-node
  1488. * meminfo in irq context.
  1489. */
  1490. static inline bool should_suppress_show_mem(void)
  1491. {
  1492. bool ret = false;
  1493. #if NODES_SHIFT > 8
  1494. ret = in_interrupt();
  1495. #endif
  1496. return ret;
  1497. }
  1498. static DEFINE_RATELIMIT_STATE(nopage_rs,
  1499. DEFAULT_RATELIMIT_INTERVAL,
  1500. DEFAULT_RATELIMIT_BURST);
  1501. void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
  1502. {
  1503. unsigned int filter = SHOW_MEM_FILTER_NODES;
  1504. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
  1505. return;
  1506. /*
  1507. * This documents exceptions given to allocations in certain
  1508. * contexts that are allowed to allocate outside current's set
  1509. * of allowed nodes.
  1510. */
  1511. if (!(gfp_mask & __GFP_NOMEMALLOC))
  1512. if (test_thread_flag(TIF_MEMDIE) ||
  1513. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  1514. filter &= ~SHOW_MEM_FILTER_NODES;
  1515. if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
  1516. filter &= ~SHOW_MEM_FILTER_NODES;
  1517. if (fmt) {
  1518. struct va_format vaf;
  1519. va_list args;
  1520. va_start(args, fmt);
  1521. vaf.fmt = fmt;
  1522. vaf.va = &args;
  1523. pr_warn("%pV", &vaf);
  1524. va_end(args);
  1525. }
  1526. pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
  1527. current->comm, order, gfp_mask);
  1528. dump_stack();
  1529. if (!should_suppress_show_mem())
  1530. show_mem(filter);
  1531. }
  1532. static inline int
  1533. should_alloc_retry(gfp_t gfp_mask, unsigned int order,
  1534. unsigned long pages_reclaimed)
  1535. {
  1536. /* Do not loop if specifically requested */
  1537. if (gfp_mask & __GFP_NORETRY)
  1538. return 0;
  1539. /*
  1540. * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
  1541. * means __GFP_NOFAIL, but that may not be true in other
  1542. * implementations.
  1543. */
  1544. if (order <= PAGE_ALLOC_COSTLY_ORDER)
  1545. return 1;
  1546. /*
  1547. * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
  1548. * specified, then we retry until we no longer reclaim any pages
  1549. * (above), or we've reclaimed an order of pages at least as
  1550. * large as the allocation's order. In both cases, if the
  1551. * allocation still fails, we stop retrying.
  1552. */
  1553. if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
  1554. return 1;
  1555. /*
  1556. * Don't let big-order allocations loop unless the caller
  1557. * explicitly requests that.
  1558. */
  1559. if (gfp_mask & __GFP_NOFAIL)
  1560. return 1;
  1561. return 0;
  1562. }
  1563. static inline struct page *
  1564. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  1565. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1566. nodemask_t *nodemask, struct zone *preferred_zone,
  1567. int migratetype)
  1568. {
  1569. struct page *page;
  1570. /* Acquire the OOM killer lock for the zones in zonelist */
  1571. if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
  1572. schedule_timeout_uninterruptible(1);
  1573. return NULL;
  1574. }
  1575. /*
  1576. * Go through the zonelist yet one more time, keep very high watermark
  1577. * here, this is only to catch a parallel oom killing, we must fail if
  1578. * we're still under heavy pressure.
  1579. */
  1580. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
  1581. order, zonelist, high_zoneidx,
  1582. ALLOC_WMARK_HIGH|ALLOC_CPUSET,
  1583. preferred_zone, migratetype);
  1584. if (page)
  1585. goto out;
  1586. if (!(gfp_mask & __GFP_NOFAIL)) {
  1587. /* The OOM killer will not help higher order allocs */
  1588. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1589. goto out;
  1590. /* The OOM killer does not needlessly kill tasks for lowmem */
  1591. if (high_zoneidx < ZONE_NORMAL)
  1592. goto out;
  1593. /*
  1594. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
  1595. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
  1596. * The caller should handle page allocation failure by itself if
  1597. * it specifies __GFP_THISNODE.
  1598. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
  1599. */
  1600. if (gfp_mask & __GFP_THISNODE)
  1601. goto out;
  1602. }
  1603. /* Exhausted what can be done so it's blamo time */
  1604. out_of_memory(zonelist, gfp_mask, order, nodemask);
  1605. out:
  1606. clear_zonelist_oom(zonelist, gfp_mask);
  1607. return page;
  1608. }
  1609. #ifdef CONFIG_COMPACTION
  1610. /* Try memory compaction for high-order allocations before reclaim */
  1611. static struct page *
  1612. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  1613. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1614. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1615. int migratetype, unsigned long *did_some_progress,
  1616. bool sync_migration)
  1617. {
  1618. struct page *page;
  1619. if (!order || compaction_deferred(preferred_zone))
  1620. return NULL;
  1621. current->flags |= PF_MEMALLOC;
  1622. *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
  1623. nodemask, sync_migration);
  1624. current->flags &= ~PF_MEMALLOC;
  1625. if (*did_some_progress != COMPACT_SKIPPED) {
  1626. /* Page migration frees to the PCP lists but we want merging */
  1627. drain_pages(get_cpu());
  1628. put_cpu();
  1629. page = get_page_from_freelist(gfp_mask, nodemask,
  1630. order, zonelist, high_zoneidx,
  1631. alloc_flags, preferred_zone,
  1632. migratetype);
  1633. if (page) {
  1634. preferred_zone->compact_considered = 0;
  1635. preferred_zone->compact_defer_shift = 0;
  1636. count_vm_event(COMPACTSUCCESS);
  1637. return page;
  1638. }
  1639. /*
  1640. * It's bad if compaction run occurs and fails.
  1641. * The most likely reason is that pages exist,
  1642. * but not enough to satisfy watermarks.
  1643. */
  1644. count_vm_event(COMPACTFAIL);
  1645. defer_compaction(preferred_zone);
  1646. cond_resched();
  1647. }
  1648. return NULL;
  1649. }
  1650. #else
  1651. static inline struct page *
  1652. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  1653. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1654. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1655. int migratetype, unsigned long *did_some_progress,
  1656. bool sync_migration)
  1657. {
  1658. return NULL;
  1659. }
  1660. #endif /* CONFIG_COMPACTION */
  1661. /* The really slow allocator path where we enter direct reclaim */
  1662. static inline struct page *
  1663. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  1664. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1665. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1666. int migratetype, unsigned long *did_some_progress)
  1667. {
  1668. struct page *page = NULL;
  1669. struct reclaim_state reclaim_state;
  1670. bool drained = false;
  1671. cond_resched();
  1672. /* We now go into synchronous reclaim */
  1673. cpuset_memory_pressure_bump();
  1674. current->flags |= PF_MEMALLOC;
  1675. lockdep_set_current_reclaim_state(gfp_mask);
  1676. reclaim_state.reclaimed_slab = 0;
  1677. current->reclaim_state = &reclaim_state;
  1678. *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
  1679. current->reclaim_state = NULL;
  1680. lockdep_clear_current_reclaim_state();
  1681. current->flags &= ~PF_MEMALLOC;
  1682. cond_resched();
  1683. if (unlikely(!(*did_some_progress)))
  1684. return NULL;
  1685. /* After successful reclaim, reconsider all zones for allocation */
  1686. if (NUMA_BUILD)
  1687. zlc_clear_zones_full(zonelist);
  1688. retry:
  1689. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1690. zonelist, high_zoneidx,
  1691. alloc_flags, preferred_zone,
  1692. migratetype);
  1693. /*
  1694. * If an allocation failed after direct reclaim, it could be because
  1695. * pages are pinned on the per-cpu lists. Drain them and try again
  1696. */
  1697. if (!page && !drained) {
  1698. drain_all_pages();
  1699. drained = true;
  1700. goto retry;
  1701. }
  1702. return page;
  1703. }
  1704. /*
  1705. * This is called in the allocator slow-path if the allocation request is of
  1706. * sufficient urgency to ignore watermarks and take other desperate measures
  1707. */
  1708. static inline struct page *
  1709. __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
  1710. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1711. nodemask_t *nodemask, struct zone *preferred_zone,
  1712. int migratetype)
  1713. {
  1714. struct page *page;
  1715. do {
  1716. page = get_page_from_freelist(gfp_mask, nodemask, order,
  1717. zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
  1718. preferred_zone, migratetype);
  1719. if (!page && gfp_mask & __GFP_NOFAIL)
  1720. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  1721. } while (!page && (gfp_mask & __GFP_NOFAIL));
  1722. return page;
  1723. }
  1724. static inline
  1725. void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
  1726. enum zone_type high_zoneidx,
  1727. enum zone_type classzone_idx)
  1728. {
  1729. struct zoneref *z;
  1730. struct zone *zone;
  1731. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
  1732. wakeup_kswapd(zone, order, classzone_idx);
  1733. }
  1734. static inline int
  1735. gfp_to_alloc_flags(gfp_t gfp_mask)
  1736. {
  1737. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  1738. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1739. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  1740. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  1741. /*
  1742. * The caller may dip into page reserves a bit more if the caller
  1743. * cannot run direct reclaim, or if the caller has realtime scheduling
  1744. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  1745. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  1746. */
  1747. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  1748. if (!wait) {
  1749. /*
  1750. * Not worth trying to allocate harder for
  1751. * __GFP_NOMEMALLOC even if it can't schedule.
  1752. */
  1753. if (!(gfp_mask & __GFP_NOMEMALLOC))
  1754. alloc_flags |= ALLOC_HARDER;
  1755. /*
  1756. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  1757. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1758. */
  1759. alloc_flags &= ~ALLOC_CPUSET;
  1760. } else if (unlikely(rt_task(current)) && !in_interrupt())
  1761. alloc_flags |= ALLOC_HARDER;
  1762. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  1763. if (!in_interrupt() &&
  1764. ((current->flags & PF_MEMALLOC) ||
  1765. unlikely(test_thread_flag(TIF_MEMDIE))))
  1766. alloc_flags |= ALLOC_NO_WATERMARKS;
  1767. }
  1768. return alloc_flags;
  1769. }
  1770. static inline struct page *
  1771. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  1772. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1773. nodemask_t *nodemask, struct zone *preferred_zone,
  1774. int migratetype)
  1775. {
  1776. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1777. struct page *page = NULL;
  1778. int alloc_flags;
  1779. unsigned long pages_reclaimed = 0;
  1780. unsigned long did_some_progress;
  1781. bool sync_migration = false;
  1782. /*
  1783. * In the slowpath, we sanity check order to avoid ever trying to
  1784. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  1785. * be using allocators in order of preference for an area that is
  1786. * too large.
  1787. */
  1788. if (order >= MAX_ORDER) {
  1789. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  1790. return NULL;
  1791. }
  1792. /*
  1793. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  1794. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  1795. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  1796. * using a larger set of nodes after it has established that the
  1797. * allowed per node queues are empty and that nodes are
  1798. * over allocated.
  1799. */
  1800. if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  1801. goto nopage;
  1802. restart:
  1803. if (!(gfp_mask & __GFP_NO_KSWAPD))
  1804. wake_all_kswapd(order, zonelist, high_zoneidx,
  1805. zone_idx(preferred_zone));
  1806. /*
  1807. * OK, we're below the kswapd watermark and have kicked background
  1808. * reclaim. Now things get more complex, so set up alloc_flags according
  1809. * to how we want to proceed.
  1810. */
  1811. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  1812. /*
  1813. * Find the true preferred zone if the allocation is unconstrained by
  1814. * cpusets.
  1815. */
  1816. if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
  1817. first_zones_zonelist(zonelist, high_zoneidx, NULL,
  1818. &preferred_zone);
  1819. rebalance:
  1820. /* This is the last chance, in general, before the goto nopage. */
  1821. page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
  1822. high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
  1823. preferred_zone, migratetype);
  1824. if (page)
  1825. goto got_pg;
  1826. /* Allocate without watermarks if the context allows */
  1827. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  1828. page = __alloc_pages_high_priority(gfp_mask, order,
  1829. zonelist, high_zoneidx, nodemask,
  1830. preferred_zone, migratetype);
  1831. if (page)
  1832. goto got_pg;
  1833. }
  1834. /* Atomic allocations - we can't balance anything */
  1835. if (!wait)
  1836. goto nopage;
  1837. /* Avoid recursion of direct reclaim */
  1838. if (current->flags & PF_MEMALLOC)
  1839. goto nopage;
  1840. /* Avoid allocations with no watermarks from looping endlessly */
  1841. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  1842. goto nopage;
  1843. /*
  1844. * Try direct compaction. The first pass is asynchronous. Subsequent
  1845. * attempts after direct reclaim are synchronous
  1846. */
  1847. page = __alloc_pages_direct_compact(gfp_mask, order,
  1848. zonelist, high_zoneidx,
  1849. nodemask,
  1850. alloc_flags, preferred_zone,
  1851. migratetype, &did_some_progress,
  1852. sync_migration);
  1853. if (page)
  1854. goto got_pg;
  1855. sync_migration = true;
  1856. /* Try direct reclaim and then allocating */
  1857. page = __alloc_pages_direct_reclaim(gfp_mask, order,
  1858. zonelist, high_zoneidx,
  1859. nodemask,
  1860. alloc_flags, preferred_zone,
  1861. migratetype, &did_some_progress);
  1862. if (page)
  1863. goto got_pg;
  1864. /*
  1865. * If we failed to make any progress reclaiming, then we are
  1866. * running out of options and have to consider going OOM
  1867. */
  1868. if (!did_some_progress) {
  1869. if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  1870. if (oom_killer_disabled)
  1871. goto nopage;
  1872. page = __alloc_pages_may_oom(gfp_mask, order,
  1873. zonelist, high_zoneidx,
  1874. nodemask, preferred_zone,
  1875. migratetype);
  1876. if (page)
  1877. goto got_pg;
  1878. if (!(gfp_mask & __GFP_NOFAIL)) {
  1879. /*
  1880. * The oom killer is not called for high-order
  1881. * allocations that may fail, so if no progress
  1882. * is being made, there are no other options and
  1883. * retrying is unlikely to help.
  1884. */
  1885. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1886. goto nopage;
  1887. /*
  1888. * The oom killer is not called for lowmem
  1889. * allocations to prevent needlessly killing
  1890. * innocent tasks.
  1891. */
  1892. if (high_zoneidx < ZONE_NORMAL)
  1893. goto nopage;
  1894. }
  1895. goto restart;
  1896. }
  1897. }
  1898. /* Check if we should retry the allocation */
  1899. pages_reclaimed += did_some_progress;
  1900. if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
  1901. /* Wait for some write requests to complete then retry */
  1902. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  1903. goto rebalance;
  1904. } else {
  1905. /*
  1906. * High-order allocations do not necessarily loop after
  1907. * direct reclaim and reclaim/compaction depends on compaction
  1908. * being called after reclaim so call directly if necessary
  1909. */
  1910. page = __alloc_pages_direct_compact(gfp_mask, order,
  1911. zonelist, high_zoneidx,
  1912. nodemask,
  1913. alloc_flags, preferred_zone,
  1914. migratetype, &did_some_progress,
  1915. sync_migration);
  1916. if (page)
  1917. goto got_pg;
  1918. }
  1919. nopage:
  1920. warn_alloc_failed(gfp_mask, order, NULL);
  1921. return page;
  1922. got_pg:
  1923. if (kmemcheck_enabled)
  1924. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  1925. return page;
  1926. }
  1927. /*
  1928. * This is the 'heart' of the zoned buddy allocator.
  1929. */
  1930. struct page *
  1931. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  1932. struct zonelist *zonelist, nodemask_t *nodemask)
  1933. {
  1934. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  1935. struct zone *preferred_zone;
  1936. struct page *page;
  1937. int migratetype = allocflags_to_migratetype(gfp_mask);
  1938. gfp_mask &= gfp_allowed_mask;
  1939. lockdep_trace_alloc(gfp_mask);
  1940. might_sleep_if(gfp_mask & __GFP_WAIT);
  1941. if (should_fail_alloc_page(gfp_mask, order))
  1942. return NULL;
  1943. /*
  1944. * Check the zones suitable for the gfp_mask contain at least one
  1945. * valid zone. It's possible to have an empty zonelist as a result
  1946. * of GFP_THISNODE and a memoryless node
  1947. */
  1948. if (unlikely(!zonelist->_zonerefs->zone))
  1949. return NULL;
  1950. get_mems_allowed();
  1951. /* The preferred zone is used for statistics later */
  1952. first_zones_zonelist(zonelist, high_zoneidx,
  1953. nodemask ? : &cpuset_current_mems_allowed,
  1954. &preferred_zone);
  1955. if (!preferred_zone) {
  1956. put_mems_allowed();
  1957. return NULL;
  1958. }
  1959. /* First allocation attempt */
  1960. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  1961. zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
  1962. preferred_zone, migratetype);
  1963. if (unlikely(!page))
  1964. page = __alloc_pages_slowpath(gfp_mask, order,
  1965. zonelist, high_zoneidx, nodemask,
  1966. preferred_zone, migratetype);
  1967. put_mems_allowed();
  1968. trace_mm_page_alloc(page, order, gfp_mask, migratetype);
  1969. return page;
  1970. }
  1971. EXPORT_SYMBOL(__alloc_pages_nodemask);
  1972. /*
  1973. * Common helper functions.
  1974. */
  1975. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  1976. {
  1977. struct page *page;
  1978. /*
  1979. * __get_free_pages() returns a 32-bit address, which cannot represent
  1980. * a highmem page
  1981. */
  1982. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  1983. page = alloc_pages(gfp_mask, order);
  1984. if (!page)
  1985. return 0;
  1986. return (unsigned long) page_address(page);
  1987. }
  1988. EXPORT_SYMBOL(__get_free_pages);
  1989. unsigned long get_zeroed_page(gfp_t gfp_mask)
  1990. {
  1991. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  1992. }
  1993. EXPORT_SYMBOL(get_zeroed_page);
  1994. void __free_pages(struct page *page, unsigned int order)
  1995. {
  1996. if (put_page_testzero(page)) {
  1997. if (order == 0)
  1998. free_hot_cold_page(page, 0);
  1999. else
  2000. __free_pages_ok(page, order);
  2001. }
  2002. }
  2003. EXPORT_SYMBOL(__free_pages);
  2004. void free_pages(unsigned long addr, unsigned int order)
  2005. {
  2006. if (addr != 0) {
  2007. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2008. __free_pages(virt_to_page((void *)addr), order);
  2009. }
  2010. }
  2011. EXPORT_SYMBOL(free_pages);
  2012. static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
  2013. {
  2014. if (addr) {
  2015. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  2016. unsigned long used = addr + PAGE_ALIGN(size);
  2017. split_page(virt_to_page((void *)addr), order);
  2018. while (used < alloc_end) {
  2019. free_page(used);
  2020. used += PAGE_SIZE;
  2021. }
  2022. }
  2023. return (void *)addr;
  2024. }
  2025. /**
  2026. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  2027. * @size: the number of bytes to allocate
  2028. * @gfp_mask: GFP flags for the allocation
  2029. *
  2030. * This function is similar to alloc_pages(), except that it allocates the
  2031. * minimum number of pages to satisfy the request. alloc_pages() can only
  2032. * allocate memory in power-of-two pages.
  2033. *
  2034. * This function is also limited by MAX_ORDER.
  2035. *
  2036. * Memory allocated by this function must be released by free_pages_exact().
  2037. */
  2038. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  2039. {
  2040. unsigned int order = get_order(size);
  2041. unsigned long addr;
  2042. addr = __get_free_pages(gfp_mask, order);
  2043. return make_alloc_exact(addr, order, size);
  2044. }
  2045. EXPORT_SYMBOL(alloc_pages_exact);
  2046. /**
  2047. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  2048. * pages on a node.
  2049. * @nid: the preferred node ID where memory should be allocated
  2050. * @size: the number of bytes to allocate
  2051. * @gfp_mask: GFP flags for the allocation
  2052. *
  2053. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  2054. * back.
  2055. * Note this is not alloc_pages_exact_node() which allocates on a specific node,
  2056. * but is not exact.
  2057. */
  2058. void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  2059. {
  2060. unsigned order = get_order(size);
  2061. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  2062. if (!p)
  2063. return NULL;
  2064. return make_alloc_exact((unsigned long)page_address(p), order, size);
  2065. }
  2066. EXPORT_SYMBOL(alloc_pages_exact_nid);
  2067. /**
  2068. * free_pages_exact - release memory allocated via alloc_pages_exact()
  2069. * @virt: the value returned by alloc_pages_exact.
  2070. * @size: size of allocation, same value as passed to alloc_pages_exact().
  2071. *
  2072. * Release the memory allocated by a previous call to alloc_pages_exact.
  2073. */
  2074. void free_pages_exact(void *virt, size_t size)
  2075. {
  2076. unsigned long addr = (unsigned long)virt;
  2077. unsigned long end = addr + PAGE_ALIGN(size);
  2078. while (addr < end) {
  2079. free_page(addr);
  2080. addr += PAGE_SIZE;
  2081. }
  2082. }
  2083. EXPORT_SYMBOL(free_pages_exact);
  2084. static unsigned int nr_free_zone_pages(int offset)
  2085. {
  2086. struct zoneref *z;
  2087. struct zone *zone;
  2088. /* Just pick one node, since fallback list is circular */
  2089. unsigned int sum = 0;
  2090. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  2091. for_each_zone_zonelist(zone, z, zonelist, offset) {
  2092. unsigned long size = zone->present_pages;
  2093. unsigned long high = high_wmark_pages(zone);
  2094. if (size > high)
  2095. sum += size - high;
  2096. }
  2097. return sum;
  2098. }
  2099. /*
  2100. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  2101. */
  2102. unsigned int nr_free_buffer_pages(void)
  2103. {
  2104. return nr_free_zone_pages(gfp_zone(GFP_USER));
  2105. }
  2106. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  2107. /*
  2108. * Amount of free RAM allocatable within all zones
  2109. */
  2110. unsigned int nr_free_pagecache_pages(void)
  2111. {
  2112. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  2113. }
  2114. static inline void show_node(struct zone *zone)
  2115. {
  2116. if (NUMA_BUILD)
  2117. printk("Node %d ", zone_to_nid(zone));
  2118. }
  2119. void si_meminfo(struct sysinfo *val)
  2120. {
  2121. val->totalram = totalram_pages;
  2122. val->sharedram = 0;
  2123. val->freeram = global_page_state(NR_FREE_PAGES);
  2124. val->bufferram = nr_blockdev_pages();
  2125. val->totalhigh = totalhigh_pages;
  2126. val->freehigh = nr_free_highpages();
  2127. val->mem_unit = PAGE_SIZE;
  2128. }
  2129. EXPORT_SYMBOL(si_meminfo);
  2130. #ifdef CONFIG_NUMA
  2131. void si_meminfo_node(struct sysinfo *val, int nid)
  2132. {
  2133. pg_data_t *pgdat = NODE_DATA(nid);
  2134. val->totalram = pgdat->node_present_pages;
  2135. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  2136. #ifdef CONFIG_HIGHMEM
  2137. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  2138. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  2139. NR_FREE_PAGES);
  2140. #else
  2141. val->totalhigh = 0;
  2142. val->freehigh = 0;
  2143. #endif
  2144. val->mem_unit = PAGE_SIZE;
  2145. }
  2146. #endif
  2147. /*
  2148. * Determine whether the node should be displayed or not, depending on whether
  2149. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  2150. */
  2151. bool skip_free_areas_node(unsigned int flags, int nid)
  2152. {
  2153. bool ret = false;
  2154. if (!(flags & SHOW_MEM_FILTER_NODES))
  2155. goto out;
  2156. get_mems_allowed();
  2157. ret = !node_isset(nid, cpuset_current_mems_allowed);
  2158. put_mems_allowed();
  2159. out:
  2160. return ret;
  2161. }
  2162. #define K(x) ((x) << (PAGE_SHIFT-10))
  2163. /*
  2164. * Show free area list (used inside shift_scroll-lock stuff)
  2165. * We also calculate the percentage fragmentation. We do this by counting the
  2166. * memory on each free list with the exception of the first item on the list.
  2167. * Suppresses nodes that are not allowed by current's cpuset if
  2168. * SHOW_MEM_FILTER_NODES is passed.
  2169. */
  2170. void show_free_areas(unsigned int filter)
  2171. {
  2172. int cpu;
  2173. struct zone *zone;
  2174. for_each_populated_zone(zone) {
  2175. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2176. continue;
  2177. show_node(zone);
  2178. printk("%s per-cpu:\n", zone->name);
  2179. for_each_online_cpu(cpu) {
  2180. struct per_cpu_pageset *pageset;
  2181. pageset = per_cpu_ptr(zone->pageset, cpu);
  2182. printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
  2183. cpu, pageset->pcp.high,
  2184. pageset->pcp.batch, pageset->pcp.count);
  2185. }
  2186. }
  2187. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  2188. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  2189. " unevictable:%lu"
  2190. " dirty:%lu writeback:%lu unstable:%lu\n"
  2191. " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  2192. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
  2193. global_page_state(NR_ACTIVE_ANON),
  2194. global_page_state(NR_INACTIVE_ANON),
  2195. global_page_state(NR_ISOLATED_ANON),
  2196. global_page_state(NR_ACTIVE_FILE),
  2197. global_page_state(NR_INACTIVE_FILE),
  2198. global_page_state(NR_ISOLATED_FILE),
  2199. global_page_state(NR_UNEVICTABLE),
  2200. global_page_state(NR_FILE_DIRTY),
  2201. global_page_state(NR_WRITEBACK),
  2202. global_page_state(NR_UNSTABLE_NFS),
  2203. global_page_state(NR_FREE_PAGES),
  2204. global_page_state(NR_SLAB_RECLAIMABLE),
  2205. global_page_state(NR_SLAB_UNRECLAIMABLE),
  2206. global_page_state(NR_FILE_MAPPED),
  2207. global_page_state(NR_SHMEM),
  2208. global_page_state(NR_PAGETABLE),
  2209. global_page_state(NR_BOUNCE));
  2210. for_each_populated_zone(zone) {
  2211. int i;
  2212. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2213. continue;
  2214. show_node(zone);
  2215. printk("%s"
  2216. " free:%lukB"
  2217. " min:%lukB"
  2218. " low:%lukB"
  2219. " high:%lukB"
  2220. " active_anon:%lukB"
  2221. " inactive_anon:%lukB"
  2222. " active_file:%lukB"
  2223. " inactive_file:%lukB"
  2224. " unevictable:%lukB"
  2225. " isolated(anon):%lukB"
  2226. " isolated(file):%lukB"
  2227. " present:%lukB"
  2228. " mlocked:%lukB"
  2229. " dirty:%lukB"
  2230. " writeback:%lukB"
  2231. " mapped:%lukB"
  2232. " shmem:%lukB"
  2233. " slab_reclaimable:%lukB"
  2234. " slab_unreclaimable:%lukB"
  2235. " kernel_stack:%lukB"
  2236. " pagetables:%lukB"
  2237. " unstable:%lukB"
  2238. " bounce:%lukB"
  2239. " writeback_tmp:%lukB"
  2240. " pages_scanned:%lu"
  2241. " all_unreclaimable? %s"
  2242. "\n",
  2243. zone->name,
  2244. K(zone_page_state(zone, NR_FREE_PAGES)),
  2245. K(min_wmark_pages(zone)),
  2246. K(low_wmark_pages(zone)),
  2247. K(high_wmark_pages(zone)),
  2248. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  2249. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  2250. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  2251. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  2252. K(zone_page_state(zone, NR_UNEVICTABLE)),
  2253. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  2254. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  2255. K(zone->present_pages),
  2256. K(zone_page_state(zone, NR_MLOCK)),
  2257. K(zone_page_state(zone, NR_FILE_DIRTY)),
  2258. K(zone_page_state(zone, NR_WRITEBACK)),
  2259. K(zone_page_state(zone, NR_FILE_MAPPED)),
  2260. K(zone_page_state(zone, NR_SHMEM)),
  2261. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  2262. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  2263. zone_page_state(zone, NR_KERNEL_STACK) *
  2264. THREAD_SIZE / 1024,
  2265. K(zone_page_state(zone, NR_PAGETABLE)),
  2266. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  2267. K(zone_page_state(zone, NR_BOUNCE)),
  2268. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  2269. zone->pages_scanned,
  2270. (zone->all_unreclaimable ? "yes" : "no")
  2271. );
  2272. printk("lowmem_reserve[]:");
  2273. for (i = 0; i < MAX_NR_ZONES; i++)
  2274. printk(" %lu", zone->lowmem_reserve[i]);
  2275. printk("\n");
  2276. }
  2277. for_each_populated_zone(zone) {
  2278. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  2279. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2280. continue;
  2281. show_node(zone);
  2282. printk("%s: ", zone->name);
  2283. spin_lock_irqsave(&zone->lock, flags);
  2284. for (order = 0; order < MAX_ORDER; order++) {
  2285. nr[order] = zone->free_area[order].nr_free;
  2286. total += nr[order] << order;
  2287. }
  2288. spin_unlock_irqrestore(&zone->lock, flags);
  2289. for (order = 0; order < MAX_ORDER; order++)
  2290. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  2291. printk("= %lukB\n", K(total));
  2292. }
  2293. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  2294. show_swap_cache_info();
  2295. }
  2296. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  2297. {
  2298. zoneref->zone = zone;
  2299. zoneref->zone_idx = zone_idx(zone);
  2300. }
  2301. /*
  2302. * Builds allocation fallback zone lists.
  2303. *
  2304. * Add all populated zones of a node to the zonelist.
  2305. */
  2306. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  2307. int nr_zones, enum zone_type zone_type)
  2308. {
  2309. struct zone *zone;
  2310. BUG_ON(zone_type >= MAX_NR_ZONES);
  2311. zone_type++;
  2312. do {
  2313. zone_type--;
  2314. zone = pgdat->node_zones + zone_type;
  2315. if (populated_zone(zone)) {
  2316. zoneref_set_zone(zone,
  2317. &zonelist->_zonerefs[nr_zones++]);
  2318. check_highest_zone(zone_type);
  2319. }
  2320. } while (zone_type);
  2321. return nr_zones;
  2322. }
  2323. /*
  2324. * zonelist_order:
  2325. * 0 = automatic detection of better ordering.
  2326. * 1 = order by ([node] distance, -zonetype)
  2327. * 2 = order by (-zonetype, [node] distance)
  2328. *
  2329. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  2330. * the same zonelist. So only NUMA can configure this param.
  2331. */
  2332. #define ZONELIST_ORDER_DEFAULT 0
  2333. #define ZONELIST_ORDER_NODE 1
  2334. #define ZONELIST_ORDER_ZONE 2
  2335. /* zonelist order in the kernel.
  2336. * set_zonelist_order() will set this to NODE or ZONE.
  2337. */
  2338. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2339. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  2340. #ifdef CONFIG_NUMA
  2341. /* The value user specified ....changed by config */
  2342. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2343. /* string for sysctl */
  2344. #define NUMA_ZONELIST_ORDER_LEN 16
  2345. char numa_zonelist_order[16] = "default";
  2346. /*
  2347. * interface for configure zonelist ordering.
  2348. * command line option "numa_zonelist_order"
  2349. * = "[dD]efault - default, automatic configuration.
  2350. * = "[nN]ode - order by node locality, then by zone within node
  2351. * = "[zZ]one - order by zone, then by locality within zone
  2352. */
  2353. static int __parse_numa_zonelist_order(char *s)
  2354. {
  2355. if (*s == 'd' || *s == 'D') {
  2356. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2357. } else if (*s == 'n' || *s == 'N') {
  2358. user_zonelist_order = ZONELIST_ORDER_NODE;
  2359. } else if (*s == 'z' || *s == 'Z') {
  2360. user_zonelist_order = ZONELIST_ORDER_ZONE;
  2361. } else {
  2362. printk(KERN_WARNING
  2363. "Ignoring invalid numa_zonelist_order value: "
  2364. "%s\n", s);
  2365. return -EINVAL;
  2366. }
  2367. return 0;
  2368. }
  2369. static __init int setup_numa_zonelist_order(char *s)
  2370. {
  2371. int ret;
  2372. if (!s)
  2373. return 0;
  2374. ret = __parse_numa_zonelist_order(s);
  2375. if (ret == 0)
  2376. strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
  2377. return ret;
  2378. }
  2379. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  2380. /*
  2381. * sysctl handler for numa_zonelist_order
  2382. */
  2383. int numa_zonelist_order_handler(ctl_table *table, int write,
  2384. void __user *buffer, size_t *length,
  2385. loff_t *ppos)
  2386. {
  2387. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  2388. int ret;
  2389. static DEFINE_MUTEX(zl_order_mutex);
  2390. mutex_lock(&zl_order_mutex);
  2391. if (write)
  2392. strcpy(saved_string, (char*)table->data);
  2393. ret = proc_dostring(table, write, buffer, length, ppos);
  2394. if (ret)
  2395. goto out;
  2396. if (write) {
  2397. int oldval = user_zonelist_order;
  2398. if (__parse_numa_zonelist_order((char*)table->data)) {
  2399. /*
  2400. * bogus value. restore saved string
  2401. */
  2402. strncpy((char*)table->data, saved_string,
  2403. NUMA_ZONELIST_ORDER_LEN);
  2404. user_zonelist_order = oldval;
  2405. } else if (oldval != user_zonelist_order) {
  2406. mutex_lock(&zonelists_mutex);
  2407. build_all_zonelists(NULL);
  2408. mutex_unlock(&zonelists_mutex);
  2409. }
  2410. }
  2411. out:
  2412. mutex_unlock(&zl_order_mutex);
  2413. return ret;
  2414. }
  2415. #define MAX_NODE_LOAD (nr_online_nodes)
  2416. static int node_load[MAX_NUMNODES];
  2417. /**
  2418. * find_next_best_node - find the next node that should appear in a given node's fallback list
  2419. * @node: node whose fallback list we're appending
  2420. * @used_node_mask: nodemask_t of already used nodes
  2421. *
  2422. * We use a number of factors to determine which is the next node that should
  2423. * appear on a given node's fallback list. The node should not have appeared
  2424. * already in @node's fallback list, and it should be the next closest node
  2425. * according to the distance array (which contains arbitrary distance values
  2426. * from each node to each node in the system), and should also prefer nodes
  2427. * with no CPUs, since presumably they'll have very little allocation pressure
  2428. * on them otherwise.
  2429. * It returns -1 if no node is found.
  2430. */
  2431. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  2432. {
  2433. int n, val;
  2434. int min_val = INT_MAX;
  2435. int best_node = -1;
  2436. const struct cpumask *tmp = cpumask_of_node(0);
  2437. /* Use the local node if we haven't already */
  2438. if (!node_isset(node, *used_node_mask)) {
  2439. node_set(node, *used_node_mask);
  2440. return node;
  2441. }
  2442. for_each_node_state(n, N_HIGH_MEMORY) {
  2443. /* Don't want a node to appear more than once */
  2444. if (node_isset(n, *used_node_mask))
  2445. continue;
  2446. /* Use the distance array to find the distance */
  2447. val = node_distance(node, n);
  2448. /* Penalize nodes under us ("prefer the next node") */
  2449. val += (n < node);
  2450. /* Give preference to headless and unused nodes */
  2451. tmp = cpumask_of_node(n);
  2452. if (!cpumask_empty(tmp))
  2453. val += PENALTY_FOR_NODE_WITH_CPUS;
  2454. /* Slight preference for less loaded node */
  2455. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  2456. val += node_load[n];
  2457. if (val < min_val) {
  2458. min_val = val;
  2459. best_node = n;
  2460. }
  2461. }
  2462. if (best_node >= 0)
  2463. node_set(best_node, *used_node_mask);
  2464. return best_node;
  2465. }
  2466. /*
  2467. * Build zonelists ordered by node and zones within node.
  2468. * This results in maximum locality--normal zone overflows into local
  2469. * DMA zone, if any--but risks exhausting DMA zone.
  2470. */
  2471. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  2472. {
  2473. int j;
  2474. struct zonelist *zonelist;
  2475. zonelist = &pgdat->node_zonelists[0];
  2476. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  2477. ;
  2478. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2479. MAX_NR_ZONES - 1);
  2480. zonelist->_zonerefs[j].zone = NULL;
  2481. zonelist->_zonerefs[j].zone_idx = 0;
  2482. }
  2483. /*
  2484. * Build gfp_thisnode zonelists
  2485. */
  2486. static void build_thisnode_zonelists(pg_data_t *pgdat)
  2487. {
  2488. int j;
  2489. struct zonelist *zonelist;
  2490. zonelist = &pgdat->node_zonelists[1];
  2491. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2492. zonelist->_zonerefs[j].zone = NULL;
  2493. zonelist->_zonerefs[j].zone_idx = 0;
  2494. }
  2495. /*
  2496. * Build zonelists ordered by zone and nodes within zones.
  2497. * This results in conserving DMA zone[s] until all Normal memory is
  2498. * exhausted, but results in overflowing to remote node while memory
  2499. * may still exist in local DMA zone.
  2500. */
  2501. static int node_order[MAX_NUMNODES];
  2502. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  2503. {
  2504. int pos, j, node;
  2505. int zone_type; /* needs to be signed */
  2506. struct zone *z;
  2507. struct zonelist *zonelist;
  2508. zonelist = &pgdat->node_zonelists[0];
  2509. pos = 0;
  2510. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  2511. for (j = 0; j < nr_nodes; j++) {
  2512. node = node_order[j];
  2513. z = &NODE_DATA(node)->node_zones[zone_type];
  2514. if (populated_zone(z)) {
  2515. zoneref_set_zone(z,
  2516. &zonelist->_zonerefs[pos++]);
  2517. check_highest_zone(zone_type);
  2518. }
  2519. }
  2520. }
  2521. zonelist->_zonerefs[pos].zone = NULL;
  2522. zonelist->_zonerefs[pos].zone_idx = 0;
  2523. }
  2524. static int default_zonelist_order(void)
  2525. {
  2526. int nid, zone_type;
  2527. unsigned long low_kmem_size,total_size;
  2528. struct zone *z;
  2529. int average_size;
  2530. /*
  2531. * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
  2532. * If they are really small and used heavily, the system can fall
  2533. * into OOM very easily.
  2534. * This function detect ZONE_DMA/DMA32 size and configures zone order.
  2535. */
  2536. /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
  2537. low_kmem_size = 0;
  2538. total_size = 0;
  2539. for_each_online_node(nid) {
  2540. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2541. z = &NODE_DATA(nid)->node_zones[zone_type];
  2542. if (populated_zone(z)) {
  2543. if (zone_type < ZONE_NORMAL)
  2544. low_kmem_size += z->present_pages;
  2545. total_size += z->present_pages;
  2546. } else if (zone_type == ZONE_NORMAL) {
  2547. /*
  2548. * If any node has only lowmem, then node order
  2549. * is preferred to allow kernel allocations
  2550. * locally; otherwise, they can easily infringe
  2551. * on other nodes when there is an abundance of
  2552. * lowmem available to allocate from.
  2553. */
  2554. return ZONELIST_ORDER_NODE;
  2555. }
  2556. }
  2557. }
  2558. if (!low_kmem_size || /* there are no DMA area. */
  2559. low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
  2560. return ZONELIST_ORDER_NODE;
  2561. /*
  2562. * look into each node's config.
  2563. * If there is a node whose DMA/DMA32 memory is very big area on
  2564. * local memory, NODE_ORDER may be suitable.
  2565. */
  2566. average_size = total_size /
  2567. (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
  2568. for_each_online_node(nid) {
  2569. low_kmem_size = 0;
  2570. total_size = 0;
  2571. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  2572. z = &NODE_DATA(nid)->node_zones[zone_type];
  2573. if (populated_zone(z)) {
  2574. if (zone_type < ZONE_NORMAL)
  2575. low_kmem_size += z->present_pages;
  2576. total_size += z->present_pages;
  2577. }
  2578. }
  2579. if (low_kmem_size &&
  2580. total_size > average_size && /* ignore small node */
  2581. low_kmem_size > total_size * 70/100)
  2582. return ZONELIST_ORDER_NODE;
  2583. }
  2584. return ZONELIST_ORDER_ZONE;
  2585. }
  2586. static void set_zonelist_order(void)
  2587. {
  2588. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  2589. current_zonelist_order = default_zonelist_order();
  2590. else
  2591. current_zonelist_order = user_zonelist_order;
  2592. }
  2593. static void build_zonelists(pg_data_t *pgdat)
  2594. {
  2595. int j, node, load;
  2596. enum zone_type i;
  2597. nodemask_t used_mask;
  2598. int local_node, prev_node;
  2599. struct zonelist *zonelist;
  2600. int order = current_zonelist_order;
  2601. /* initialize zonelists */
  2602. for (i = 0; i < MAX_ZONELISTS; i++) {
  2603. zonelist = pgdat->node_zonelists + i;
  2604. zonelist->_zonerefs[0].zone = NULL;
  2605. zonelist->_zonerefs[0].zone_idx = 0;
  2606. }
  2607. /* NUMA-aware ordering of nodes */
  2608. local_node = pgdat->node_id;
  2609. load = nr_online_nodes;
  2610. prev_node = local_node;
  2611. nodes_clear(used_mask);
  2612. memset(node_order, 0, sizeof(node_order));
  2613. j = 0;
  2614. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  2615. int distance = node_distance(local_node, node);
  2616. /*
  2617. * If another node is sufficiently far away then it is better
  2618. * to reclaim pages in a zone before going off node.
  2619. */
  2620. if (distance > RECLAIM_DISTANCE)
  2621. zone_reclaim_mode = 1;
  2622. /*
  2623. * We don't want to pressure a particular node.
  2624. * So adding penalty to the first node in same
  2625. * distance group to make it round-robin.
  2626. */
  2627. if (distance != node_distance(local_node, prev_node))
  2628. node_load[node] = load;
  2629. prev_node = node;
  2630. load--;
  2631. if (order == ZONELIST_ORDER_NODE)
  2632. build_zonelists_in_node_order(pgdat, node);
  2633. else
  2634. node_order[j++] = node; /* remember order */
  2635. }
  2636. if (order == ZONELIST_ORDER_ZONE) {
  2637. /* calculate node order -- i.e., DMA last! */
  2638. build_zonelists_in_zone_order(pgdat, j);
  2639. }
  2640. build_thisnode_zonelists(pgdat);
  2641. }
  2642. /* Construct the zonelist performance cache - see further mmzone.h */
  2643. static void build_zonelist_cache(pg_data_t *pgdat)
  2644. {
  2645. struct zonelist *zonelist;
  2646. struct zonelist_cache *zlc;
  2647. struct zoneref *z;
  2648. zonelist = &pgdat->node_zonelists[0];
  2649. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  2650. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  2651. for (z = zonelist->_zonerefs; z->zone; z++)
  2652. zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
  2653. }
  2654. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  2655. /*
  2656. * Return node id of node used for "local" allocations.
  2657. * I.e., first node id of first zone in arg node's generic zonelist.
  2658. * Used for initializing percpu 'numa_mem', which is used primarily
  2659. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  2660. */
  2661. int local_memory_node(int node)
  2662. {
  2663. struct zone *zone;
  2664. (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  2665. gfp_zone(GFP_KERNEL),
  2666. NULL,
  2667. &zone);
  2668. return zone->node;
  2669. }
  2670. #endif
  2671. #else /* CONFIG_NUMA */
  2672. static void set_zonelist_order(void)
  2673. {
  2674. current_zonelist_order = ZONELIST_ORDER_ZONE;
  2675. }
  2676. static void build_zonelists(pg_data_t *pgdat)
  2677. {
  2678. int node, local_node;
  2679. enum zone_type j;
  2680. struct zonelist *zonelist;
  2681. local_node = pgdat->node_id;
  2682. zonelist = &pgdat->node_zonelists[0];
  2683. j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
  2684. /*
  2685. * Now we build the zonelist so that it contains the zones
  2686. * of all the other nodes.
  2687. * We don't want to pressure a particular node, so when
  2688. * building the zones for node N, we make sure that the
  2689. * zones coming right after the local ones are those from
  2690. * node N+1 (modulo N)
  2691. */
  2692. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  2693. if (!node_online(node))
  2694. continue;
  2695. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2696. MAX_NR_ZONES - 1);
  2697. }
  2698. for (node = 0; node < local_node; node++) {
  2699. if (!node_online(node))
  2700. continue;
  2701. j = build_zonelists_node(NODE_DATA(node), zonelist, j,
  2702. MAX_NR_ZONES - 1);
  2703. }
  2704. zonelist->_zonerefs[j].zone = NULL;
  2705. zonelist->_zonerefs[j].zone_idx = 0;
  2706. }
  2707. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  2708. static void build_zonelist_cache(pg_data_t *pgdat)
  2709. {
  2710. pgdat->node_zonelists[0].zlcache_ptr = NULL;
  2711. }
  2712. #endif /* CONFIG_NUMA */
  2713. /*
  2714. * Boot pageset table. One per cpu which is going to be used for all
  2715. * zones and all nodes. The parameters will be set in such a way
  2716. * that an item put on a list will immediately be handed over to
  2717. * the buddy list. This is safe since pageset manipulation is done
  2718. * with interrupts disabled.
  2719. *
  2720. * The boot_pagesets must be kept even after bootup is complete for
  2721. * unused processors and/or zones. They do play a role for bootstrapping
  2722. * hotplugged processors.
  2723. *
  2724. * zoneinfo_show() and maybe other functions do
  2725. * not check if the processor is online before following the pageset pointer.
  2726. * Other parts of the kernel may not check if the zone is available.
  2727. */
  2728. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  2729. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  2730. static void setup_zone_pageset(struct zone *zone);
  2731. /*
  2732. * Global mutex to protect against size modification of zonelists
  2733. * as well as to serialize pageset setup for the new populated zone.
  2734. */
  2735. DEFINE_MUTEX(zonelists_mutex);
  2736. /* return values int ....just for stop_machine() */
  2737. static __init_refok int __build_all_zonelists(void *data)
  2738. {
  2739. int nid;
  2740. int cpu;
  2741. #ifdef CONFIG_NUMA
  2742. memset(node_load, 0, sizeof(node_load));
  2743. #endif
  2744. for_each_online_node(nid) {
  2745. pg_data_t *pgdat = NODE_DATA(nid);
  2746. build_zonelists(pgdat);
  2747. build_zonelist_cache(pgdat);
  2748. }
  2749. /*
  2750. * Initialize the boot_pagesets that are going to be used
  2751. * for bootstrapping processors. The real pagesets for
  2752. * each zone will be allocated later when the per cpu
  2753. * allocator is available.
  2754. *
  2755. * boot_pagesets are used also for bootstrapping offline
  2756. * cpus if the system is already booted because the pagesets
  2757. * are needed to initialize allocators on a specific cpu too.
  2758. * F.e. the percpu allocator needs the page allocator which
  2759. * needs the percpu allocator in order to allocate its pagesets
  2760. * (a chicken-egg dilemma).
  2761. */
  2762. for_each_possible_cpu(cpu) {
  2763. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  2764. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  2765. /*
  2766. * We now know the "local memory node" for each node--
  2767. * i.e., the node of the first zone in the generic zonelist.
  2768. * Set up numa_mem percpu variable for on-line cpus. During
  2769. * boot, only the boot cpu should be on-line; we'll init the
  2770. * secondary cpus' numa_mem as they come on-line. During
  2771. * node/memory hotplug, we'll fixup all on-line cpus.
  2772. */
  2773. if (cpu_online(cpu))
  2774. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  2775. #endif
  2776. }
  2777. return 0;
  2778. }
  2779. /*
  2780. * Called with zonelists_mutex held always
  2781. * unless system_state == SYSTEM_BOOTING.
  2782. */
  2783. void __ref build_all_zonelists(void *data)
  2784. {
  2785. set_zonelist_order();
  2786. if (system_state == SYSTEM_BOOTING) {
  2787. __build_all_zonelists(NULL);
  2788. mminit_verify_zonelist();
  2789. cpuset_init_current_mems_allowed();
  2790. } else {
  2791. /* we have to stop all cpus to guarantee there is no user
  2792. of zonelist */
  2793. #ifdef CONFIG_MEMORY_HOTPLUG
  2794. if (data)
  2795. setup_zone_pageset((struct zone *)data);
  2796. #endif
  2797. stop_machine(__build_all_zonelists, NULL, NULL);
  2798. /* cpuset refresh routine should be here */
  2799. }
  2800. vm_total_pages = nr_free_pagecache_pages();
  2801. /*
  2802. * Disable grouping by mobility if the number of pages in the
  2803. * system is too low to allow the mechanism to work. It would be
  2804. * more accurate, but expensive to check per-zone. This check is
  2805. * made on memory-hotadd so a system can start with mobility
  2806. * disabled and enable it later
  2807. */
  2808. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  2809. page_group_by_mobility_disabled = 1;
  2810. else
  2811. page_group_by_mobility_disabled = 0;
  2812. printk("Built %i zonelists in %s order, mobility grouping %s. "
  2813. "Total pages: %ld\n",
  2814. nr_online_nodes,
  2815. zonelist_order_name[current_zonelist_order],
  2816. page_group_by_mobility_disabled ? "off" : "on",
  2817. vm_total_pages);
  2818. #ifdef CONFIG_NUMA
  2819. printk("Policy zone: %s\n", zone_names[policy_zone]);
  2820. #endif
  2821. }
  2822. /*
  2823. * Helper functions to size the waitqueue hash table.
  2824. * Essentially these want to choose hash table sizes sufficiently
  2825. * large so that collisions trying to wait on pages are rare.
  2826. * But in fact, the number of active page waitqueues on typical
  2827. * systems is ridiculously low, less than 200. So this is even
  2828. * conservative, even though it seems large.
  2829. *
  2830. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  2831. * waitqueues, i.e. the size of the waitq table given the number of pages.
  2832. */
  2833. #define PAGES_PER_WAITQUEUE 256
  2834. #ifndef CONFIG_MEMORY_HOTPLUG
  2835. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2836. {
  2837. unsigned long size = 1;
  2838. pages /= PAGES_PER_WAITQUEUE;
  2839. while (size < pages)
  2840. size <<= 1;
  2841. /*
  2842. * Once we have dozens or even hundreds of threads sleeping
  2843. * on IO we've got bigger problems than wait queue collision.
  2844. * Limit the size of the wait table to a reasonable size.
  2845. */
  2846. size = min(size, 4096UL);
  2847. return max(size, 4UL);
  2848. }
  2849. #else
  2850. /*
  2851. * A zone's size might be changed by hot-add, so it is not possible to determine
  2852. * a suitable size for its wait_table. So we use the maximum size now.
  2853. *
  2854. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  2855. *
  2856. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  2857. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  2858. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  2859. *
  2860. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  2861. * or more by the traditional way. (See above). It equals:
  2862. *
  2863. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  2864. * ia64(16K page size) : = ( 8G + 4M)byte.
  2865. * powerpc (64K page size) : = (32G +16M)byte.
  2866. */
  2867. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2868. {
  2869. return 4096UL;
  2870. }
  2871. #endif
  2872. /*
  2873. * This is an integer logarithm so that shifts can be used later
  2874. * to extract the more random high bits from the multiplicative
  2875. * hash function before the remainder is taken.
  2876. */
  2877. static inline unsigned long wait_table_bits(unsigned long size)
  2878. {
  2879. return ffz(~size);
  2880. }
  2881. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  2882. /*
  2883. * Check if a pageblock contains reserved pages
  2884. */
  2885. static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
  2886. {
  2887. unsigned long pfn;
  2888. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  2889. if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
  2890. return 1;
  2891. }
  2892. return 0;
  2893. }
  2894. /*
  2895. * Mark a number of pageblocks as MIGRATE_RESERVE. The number
  2896. * of blocks reserved is based on min_wmark_pages(zone). The memory within
  2897. * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  2898. * higher will lead to a bigger reserve which will get freed as contiguous
  2899. * blocks as reclaim kicks in
  2900. */
  2901. static void setup_zone_migrate_reserve(struct zone *zone)
  2902. {
  2903. unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
  2904. struct page *page;
  2905. unsigned long block_migratetype;
  2906. int reserve;
  2907. /*
  2908. * Get the start pfn, end pfn and the number of blocks to reserve
  2909. * We have to be careful to be aligned to pageblock_nr_pages to
  2910. * make sure that we always check pfn_valid for the first page in
  2911. * the block.
  2912. */
  2913. start_pfn = zone->zone_start_pfn;
  2914. end_pfn = start_pfn + zone->spanned_pages;
  2915. start_pfn = roundup(start_pfn, pageblock_nr_pages);
  2916. reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
  2917. pageblock_order;
  2918. /*
  2919. * Reserve blocks are generally in place to help high-order atomic
  2920. * allocations that are short-lived. A min_free_kbytes value that
  2921. * would result in more than 2 reserve blocks for atomic allocations
  2922. * is assumed to be in place to help anti-fragmentation for the
  2923. * future allocation of hugepages at runtime.
  2924. */
  2925. reserve = min(2, reserve);
  2926. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  2927. if (!pfn_valid(pfn))
  2928. continue;
  2929. page = pfn_to_page(pfn);
  2930. /* Watch out for overlapping nodes */
  2931. if (page_to_nid(page) != zone_to_nid(zone))
  2932. continue;
  2933. block_migratetype = get_pageblock_migratetype(page);
  2934. /* Only test what is necessary when the reserves are not met */
  2935. if (reserve > 0) {
  2936. /*
  2937. * Blocks with reserved pages will never free, skip
  2938. * them.
  2939. */
  2940. block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
  2941. if (pageblock_is_reserved(pfn, block_end_pfn))
  2942. continue;
  2943. /* If this block is reserved, account for it */
  2944. if (block_migratetype == MIGRATE_RESERVE) {
  2945. reserve--;
  2946. continue;
  2947. }
  2948. /* Suitable for reserving if this block is movable */
  2949. if (block_migratetype == MIGRATE_MOVABLE) {
  2950. set_pageblock_migratetype(page,
  2951. MIGRATE_RESERVE);
  2952. move_freepages_block(zone, page,
  2953. MIGRATE_RESERVE);
  2954. reserve--;
  2955. continue;
  2956. }
  2957. }
  2958. /*
  2959. * If the reserve is met and this is a previous reserved block,
  2960. * take it back
  2961. */
  2962. if (block_migratetype == MIGRATE_RESERVE) {
  2963. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2964. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  2965. }
  2966. }
  2967. }
  2968. /*
  2969. * Initially all pages are reserved - free ones are freed
  2970. * up by free_all_bootmem() once the early boot process is
  2971. * done. Non-atomic initialization, single-pass.
  2972. */
  2973. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  2974. unsigned long start_pfn, enum memmap_context context)
  2975. {
  2976. struct page *page;
  2977. unsigned long end_pfn = start_pfn + size;
  2978. unsigned long pfn;
  2979. struct zone *z;
  2980. if (highest_memmap_pfn < end_pfn - 1)
  2981. highest_memmap_pfn = end_pfn - 1;
  2982. z = &NODE_DATA(nid)->node_zones[zone];
  2983. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  2984. /*
  2985. * There can be holes in boot-time mem_map[]s
  2986. * handed to this function. They do not
  2987. * exist on hotplugged memory.
  2988. */
  2989. if (context == MEMMAP_EARLY) {
  2990. if (!early_pfn_valid(pfn))
  2991. continue;
  2992. if (!early_pfn_in_nid(pfn, nid))
  2993. continue;
  2994. }
  2995. page = pfn_to_page(pfn);
  2996. set_page_links(page, zone, nid, pfn);
  2997. mminit_verify_page_links(page, zone, nid, pfn);
  2998. init_page_count(page);
  2999. reset_page_mapcount(page);
  3000. SetPageReserved(page);
  3001. /*
  3002. * Mark the block movable so that blocks are reserved for
  3003. * movable at startup. This will force kernel allocations
  3004. * to reserve their blocks rather than leaking throughout
  3005. * the address space during boot when many long-lived
  3006. * kernel allocations are made. Later some blocks near
  3007. * the start are marked MIGRATE_RESERVE by
  3008. * setup_zone_migrate_reserve()
  3009. *
  3010. * bitmap is created for zone's valid pfn range. but memmap
  3011. * can be created for invalid pages (for alignment)
  3012. * check here not to call set_pageblock_migratetype() against
  3013. * pfn out of zone.
  3014. */
  3015. if ((z->zone_start_pfn <= pfn)
  3016. && (pfn < z->zone_start_pfn + z->spanned_pages)
  3017. && !(pfn & (pageblock_nr_pages - 1)))
  3018. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  3019. INIT_LIST_HEAD(&page->lru);
  3020. #ifdef WANT_PAGE_VIRTUAL
  3021. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  3022. if (!is_highmem_idx(zone))
  3023. set_page_address(page, __va(pfn << PAGE_SHIFT));
  3024. #endif
  3025. }
  3026. }
  3027. static void __meminit zone_init_free_lists(struct zone *zone)
  3028. {
  3029. int order, t;
  3030. for_each_migratetype_order(order, t) {
  3031. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  3032. zone->free_area[order].nr_free = 0;
  3033. }
  3034. }
  3035. #ifndef __HAVE_ARCH_MEMMAP_INIT
  3036. #define memmap_init(size, nid, zone, start_pfn) \
  3037. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  3038. #endif
  3039. static int zone_batchsize(struct zone *zone)
  3040. {
  3041. #ifdef CONFIG_MMU
  3042. int batch;
  3043. /*
  3044. * The per-cpu-pages pools are set to around 1000th of the
  3045. * size of the zone. But no more than 1/2 of a meg.
  3046. *
  3047. * OK, so we don't know how big the cache is. So guess.
  3048. */
  3049. batch = zone->present_pages / 1024;
  3050. if (batch * PAGE_SIZE > 512 * 1024)
  3051. batch = (512 * 1024) / PAGE_SIZE;
  3052. batch /= 4; /* We effectively *= 4 below */
  3053. if (batch < 1)
  3054. batch = 1;
  3055. /*
  3056. * Clamp the batch to a 2^n - 1 value. Having a power
  3057. * of 2 value was found to be more likely to have
  3058. * suboptimal cache aliasing properties in some cases.
  3059. *
  3060. * For example if 2 tasks are alternately allocating
  3061. * batches of pages, one task can end up with a lot
  3062. * of pages of one half of the possible page colors
  3063. * and the other with pages of the other colors.
  3064. */
  3065. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  3066. return batch;
  3067. #else
  3068. /* The deferral and batching of frees should be suppressed under NOMMU
  3069. * conditions.
  3070. *
  3071. * The problem is that NOMMU needs to be able to allocate large chunks
  3072. * of contiguous memory as there's no hardware page translation to
  3073. * assemble apparent contiguous memory from discontiguous pages.
  3074. *
  3075. * Queueing large contiguous runs of pages for batching, however,
  3076. * causes the pages to actually be freed in smaller chunks. As there
  3077. * can be a significant delay between the individual batches being
  3078. * recycled, this leads to the once large chunks of space being
  3079. * fragmented and becoming unavailable for high-order allocations.
  3080. */
  3081. return 0;
  3082. #endif
  3083. }
  3084. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  3085. {
  3086. struct per_cpu_pages *pcp;
  3087. int migratetype;
  3088. memset(p, 0, sizeof(*p));
  3089. pcp = &p->pcp;
  3090. pcp->count = 0;
  3091. pcp->high = 6 * batch;
  3092. pcp->batch = max(1UL, 1 * batch);
  3093. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  3094. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  3095. }
  3096. /*
  3097. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  3098. * to the value high for the pageset p.
  3099. */
  3100. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  3101. unsigned long high)
  3102. {
  3103. struct per_cpu_pages *pcp;
  3104. pcp = &p->pcp;
  3105. pcp->high = high;
  3106. pcp->batch = max(1UL, high/4);
  3107. if ((high/4) > (PAGE_SHIFT * 8))
  3108. pcp->batch = PAGE_SHIFT * 8;
  3109. }
  3110. static void setup_zone_pageset(struct zone *zone)
  3111. {
  3112. int cpu;
  3113. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  3114. for_each_possible_cpu(cpu) {
  3115. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  3116. setup_pageset(pcp, zone_batchsize(zone));
  3117. if (percpu_pagelist_fraction)
  3118. setup_pagelist_highmark(pcp,
  3119. (zone->present_pages /
  3120. percpu_pagelist_fraction));
  3121. }
  3122. }
  3123. /*
  3124. * Allocate per cpu pagesets and initialize them.
  3125. * Before this call only boot pagesets were available.
  3126. */
  3127. void __init setup_per_cpu_pageset(void)
  3128. {
  3129. struct zone *zone;
  3130. for_each_populated_zone(zone)
  3131. setup_zone_pageset(zone);
  3132. }
  3133. static noinline __init_refok
  3134. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  3135. {
  3136. int i;
  3137. struct pglist_data *pgdat = zone->zone_pgdat;
  3138. size_t alloc_size;
  3139. /*
  3140. * The per-page waitqueue mechanism uses hashed waitqueues
  3141. * per zone.
  3142. */
  3143. zone->wait_table_hash_nr_entries =
  3144. wait_table_hash_nr_entries(zone_size_pages);
  3145. zone->wait_table_bits =
  3146. wait_table_bits(zone->wait_table_hash_nr_entries);
  3147. alloc_size = zone->wait_table_hash_nr_entries
  3148. * sizeof(wait_queue_head_t);
  3149. if (!slab_is_available()) {
  3150. zone->wait_table = (wait_queue_head_t *)
  3151. alloc_bootmem_node_nopanic(pgdat, alloc_size);
  3152. } else {
  3153. /*
  3154. * This case means that a zone whose size was 0 gets new memory
  3155. * via memory hot-add.
  3156. * But it may be the case that a new node was hot-added. In
  3157. * this case vmalloc() will not be able to use this new node's
  3158. * memory - this wait_table must be initialized to use this new
  3159. * node itself as well.
  3160. * To use this new node's memory, further consideration will be
  3161. * necessary.
  3162. */
  3163. zone->wait_table = vmalloc(alloc_size);
  3164. }
  3165. if (!zone->wait_table)
  3166. return -ENOMEM;
  3167. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  3168. init_waitqueue_head(zone->wait_table + i);
  3169. return 0;
  3170. }
  3171. static int __zone_pcp_update(void *data)
  3172. {
  3173. struct zone *zone = data;
  3174. int cpu;
  3175. unsigned long batch = zone_batchsize(zone), flags;
  3176. for_each_possible_cpu(cpu) {
  3177. struct per_cpu_pageset *pset;
  3178. struct per_cpu_pages *pcp;
  3179. pset = per_cpu_ptr(zone->pageset, cpu);
  3180. pcp = &pset->pcp;
  3181. local_irq_save(flags);
  3182. free_pcppages_bulk(zone, pcp->count, pcp);
  3183. setup_pageset(pset, batch);
  3184. local_irq_restore(flags);
  3185. }
  3186. return 0;
  3187. }
  3188. void zone_pcp_update(struct zone *zone)
  3189. {
  3190. stop_machine(__zone_pcp_update, zone, NULL);
  3191. }
  3192. static __meminit void zone_pcp_init(struct zone *zone)
  3193. {
  3194. /*
  3195. * per cpu subsystem is not up at this point. The following code
  3196. * relies on the ability of the linker to provide the
  3197. * offset of a (static) per cpu variable into the per cpu area.
  3198. */
  3199. zone->pageset = &boot_pageset;
  3200. if (zone->present_pages)
  3201. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  3202. zone->name, zone->present_pages,
  3203. zone_batchsize(zone));
  3204. }
  3205. __meminit int init_currently_empty_zone(struct zone *zone,
  3206. unsigned long zone_start_pfn,
  3207. unsigned long size,
  3208. enum memmap_context context)
  3209. {
  3210. struct pglist_data *pgdat = zone->zone_pgdat;
  3211. int ret;
  3212. ret = zone_wait_table_init(zone, size);
  3213. if (ret)
  3214. return ret;
  3215. pgdat->nr_zones = zone_idx(zone) + 1;
  3216. zone->zone_start_pfn = zone_start_pfn;
  3217. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  3218. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  3219. pgdat->node_id,
  3220. (unsigned long)zone_idx(zone),
  3221. zone_start_pfn, (zone_start_pfn + size));
  3222. zone_init_free_lists(zone);
  3223. return 0;
  3224. }
  3225. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3226. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  3227. /*
  3228. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  3229. * Architectures may implement their own version but if add_active_range()
  3230. * was used and there are no special requirements, this is a convenient
  3231. * alternative
  3232. */
  3233. int __meminit __early_pfn_to_nid(unsigned long pfn)
  3234. {
  3235. unsigned long start_pfn, end_pfn;
  3236. int i, nid;
  3237. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  3238. if (start_pfn <= pfn && pfn < end_pfn)
  3239. return nid;
  3240. /* This is a memory hole */
  3241. return -1;
  3242. }
  3243. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  3244. int __meminit early_pfn_to_nid(unsigned long pfn)
  3245. {
  3246. int nid;
  3247. nid = __early_pfn_to_nid(pfn);
  3248. if (nid >= 0)
  3249. return nid;
  3250. /* just returns 0 */
  3251. return 0;
  3252. }
  3253. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  3254. bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  3255. {
  3256. int nid;
  3257. nid = __early_pfn_to_nid(pfn);
  3258. if (nid >= 0 && nid != node)
  3259. return false;
  3260. return true;
  3261. }
  3262. #endif
  3263. /**
  3264. * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  3265. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  3266. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
  3267. *
  3268. * If an architecture guarantees that all ranges registered with
  3269. * add_active_ranges() contain no holes and may be freed, this
  3270. * this function may be used instead of calling free_bootmem() manually.
  3271. */
  3272. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  3273. {
  3274. unsigned long start_pfn, end_pfn;
  3275. int i, this_nid;
  3276. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  3277. start_pfn = min(start_pfn, max_low_pfn);
  3278. end_pfn = min(end_pfn, max_low_pfn);
  3279. if (start_pfn < end_pfn)
  3280. free_bootmem_node(NODE_DATA(this_nid),
  3281. PFN_PHYS(start_pfn),
  3282. (end_pfn - start_pfn) << PAGE_SHIFT);
  3283. }
  3284. }
  3285. int __init add_from_early_node_map(struct range *range, int az,
  3286. int nr_range, int nid)
  3287. {
  3288. unsigned long start_pfn, end_pfn;
  3289. int i;
  3290. /* need to go over early_node_map to find out good range for node */
  3291. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
  3292. nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
  3293. return nr_range;
  3294. }
  3295. /**
  3296. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  3297. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  3298. *
  3299. * If an architecture guarantees that all ranges registered with
  3300. * add_active_ranges() contain no holes and may be freed, this
  3301. * function may be used instead of calling memory_present() manually.
  3302. */
  3303. void __init sparse_memory_present_with_active_regions(int nid)
  3304. {
  3305. unsigned long start_pfn, end_pfn;
  3306. int i, this_nid;
  3307. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  3308. memory_present(this_nid, start_pfn, end_pfn);
  3309. }
  3310. /**
  3311. * get_pfn_range_for_nid - Return the start and end page frames for a node
  3312. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  3313. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  3314. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  3315. *
  3316. * It returns the start and end page frame of a node based on information
  3317. * provided by an arch calling add_active_range(). If called for a node
  3318. * with no available memory, a warning is printed and the start and end
  3319. * PFNs will be 0.
  3320. */
  3321. void __meminit get_pfn_range_for_nid(unsigned int nid,
  3322. unsigned long *start_pfn, unsigned long *end_pfn)
  3323. {
  3324. unsigned long this_start_pfn, this_end_pfn;
  3325. int i;
  3326. *start_pfn = -1UL;
  3327. *end_pfn = 0;
  3328. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  3329. *start_pfn = min(*start_pfn, this_start_pfn);
  3330. *end_pfn = max(*end_pfn, this_end_pfn);
  3331. }
  3332. if (*start_pfn == -1UL)
  3333. *start_pfn = 0;
  3334. }
  3335. /*
  3336. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  3337. * assumption is made that zones within a node are ordered in monotonic
  3338. * increasing memory addresses so that the "highest" populated zone is used
  3339. */
  3340. static void __init find_usable_zone_for_movable(void)
  3341. {
  3342. int zone_index;
  3343. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  3344. if (zone_index == ZONE_MOVABLE)
  3345. continue;
  3346. if (arch_zone_highest_possible_pfn[zone_index] >
  3347. arch_zone_lowest_possible_pfn[zone_index])
  3348. break;
  3349. }
  3350. VM_BUG_ON(zone_index == -1);
  3351. movable_zone = zone_index;
  3352. }
  3353. /*
  3354. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  3355. * because it is sized independent of architecture. Unlike the other zones,
  3356. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  3357. * in each node depending on the size of each node and how evenly kernelcore
  3358. * is distributed. This helper function adjusts the zone ranges
  3359. * provided by the architecture for a given node by using the end of the
  3360. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  3361. * zones within a node are in order of monotonic increases memory addresses
  3362. */
  3363. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  3364. unsigned long zone_type,
  3365. unsigned long node_start_pfn,
  3366. unsigned long node_end_pfn,
  3367. unsigned long *zone_start_pfn,
  3368. unsigned long *zone_end_pfn)
  3369. {
  3370. /* Only adjust if ZONE_MOVABLE is on this node */
  3371. if (zone_movable_pfn[nid]) {
  3372. /* Size ZONE_MOVABLE */
  3373. if (zone_type == ZONE_MOVABLE) {
  3374. *zone_start_pfn = zone_movable_pfn[nid];
  3375. *zone_end_pfn = min(node_end_pfn,
  3376. arch_zone_highest_possible_pfn[movable_zone]);
  3377. /* Adjust for ZONE_MOVABLE starting within this range */
  3378. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  3379. *zone_end_pfn > zone_movable_pfn[nid]) {
  3380. *zone_end_pfn = zone_movable_pfn[nid];
  3381. /* Check if this whole range is within ZONE_MOVABLE */
  3382. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  3383. *zone_start_pfn = *zone_end_pfn;
  3384. }
  3385. }
  3386. /*
  3387. * Return the number of pages a zone spans in a node, including holes
  3388. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  3389. */
  3390. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  3391. unsigned long zone_type,
  3392. unsigned long *ignored)
  3393. {
  3394. unsigned long node_start_pfn, node_end_pfn;
  3395. unsigned long zone_start_pfn, zone_end_pfn;
  3396. /* Get the start and end of the node and zone */
  3397. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  3398. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  3399. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  3400. adjust_zone_range_for_zone_movable(nid, zone_type,
  3401. node_start_pfn, node_end_pfn,
  3402. &zone_start_pfn, &zone_end_pfn);
  3403. /* Check that this node has pages within the zone's required range */
  3404. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  3405. return 0;
  3406. /* Move the zone boundaries inside the node if necessary */
  3407. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  3408. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  3409. /* Return the spanned pages */
  3410. return zone_end_pfn - zone_start_pfn;
  3411. }
  3412. /*
  3413. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  3414. * then all holes in the requested range will be accounted for.
  3415. */
  3416. unsigned long __meminit __absent_pages_in_range(int nid,
  3417. unsigned long range_start_pfn,
  3418. unsigned long range_end_pfn)
  3419. {
  3420. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  3421. unsigned long start_pfn, end_pfn;
  3422. int i;
  3423. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  3424. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  3425. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  3426. nr_absent -= end_pfn - start_pfn;
  3427. }
  3428. return nr_absent;
  3429. }
  3430. /**
  3431. * absent_pages_in_range - Return number of page frames in holes within a range
  3432. * @start_pfn: The start PFN to start searching for holes
  3433. * @end_pfn: The end PFN to stop searching for holes
  3434. *
  3435. * It returns the number of pages frames in memory holes within a range.
  3436. */
  3437. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  3438. unsigned long end_pfn)
  3439. {
  3440. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  3441. }
  3442. /* Return the number of page frames in holes in a zone on a node */
  3443. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  3444. unsigned long zone_type,
  3445. unsigned long *ignored)
  3446. {
  3447. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  3448. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  3449. unsigned long node_start_pfn, node_end_pfn;
  3450. unsigned long zone_start_pfn, zone_end_pfn;
  3451. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  3452. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  3453. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  3454. adjust_zone_range_for_zone_movable(nid, zone_type,
  3455. node_start_pfn, node_end_pfn,
  3456. &zone_start_pfn, &zone_end_pfn);
  3457. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  3458. }
  3459. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  3460. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  3461. unsigned long zone_type,
  3462. unsigned long *zones_size)
  3463. {
  3464. return zones_size[zone_type];
  3465. }
  3466. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  3467. unsigned long zone_type,
  3468. unsigned long *zholes_size)
  3469. {
  3470. if (!zholes_size)
  3471. return 0;
  3472. return zholes_size[zone_type];
  3473. }
  3474. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  3475. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  3476. unsigned long *zones_size, unsigned long *zholes_size)
  3477. {
  3478. unsigned long realtotalpages, totalpages = 0;
  3479. enum zone_type i;
  3480. for (i = 0; i < MAX_NR_ZONES; i++)
  3481. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  3482. zones_size);
  3483. pgdat->node_spanned_pages = totalpages;
  3484. realtotalpages = totalpages;
  3485. for (i = 0; i < MAX_NR_ZONES; i++)
  3486. realtotalpages -=
  3487. zone_absent_pages_in_node(pgdat->node_id, i,
  3488. zholes_size);
  3489. pgdat->node_present_pages = realtotalpages;
  3490. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  3491. realtotalpages);
  3492. }
  3493. #ifndef CONFIG_SPARSEMEM
  3494. /*
  3495. * Calculate the size of the zone->blockflags rounded to an unsigned long
  3496. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  3497. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  3498. * round what is now in bits to nearest long in bits, then return it in
  3499. * bytes.
  3500. */
  3501. static unsigned long __init usemap_size(unsigned long zonesize)
  3502. {
  3503. unsigned long usemapsize;
  3504. usemapsize = roundup(zonesize, pageblock_nr_pages);
  3505. usemapsize = usemapsize >> pageblock_order;
  3506. usemapsize *= NR_PAGEBLOCK_BITS;
  3507. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  3508. return usemapsize / 8;
  3509. }
  3510. static void __init setup_usemap(struct pglist_data *pgdat,
  3511. struct zone *zone, unsigned long zonesize)
  3512. {
  3513. unsigned long usemapsize = usemap_size(zonesize);
  3514. zone->pageblock_flags = NULL;
  3515. if (usemapsize)
  3516. zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
  3517. usemapsize);
  3518. }
  3519. #else
  3520. static inline void setup_usemap(struct pglist_data *pgdat,
  3521. struct zone *zone, unsigned long zonesize) {}
  3522. #endif /* CONFIG_SPARSEMEM */
  3523. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  3524. /* Return a sensible default order for the pageblock size. */
  3525. static inline int pageblock_default_order(void)
  3526. {
  3527. if (HPAGE_SHIFT > PAGE_SHIFT)
  3528. return HUGETLB_PAGE_ORDER;
  3529. return MAX_ORDER-1;
  3530. }
  3531. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  3532. static inline void __init set_pageblock_order(unsigned int order)
  3533. {
  3534. /* Check that pageblock_nr_pages has not already been setup */
  3535. if (pageblock_order)
  3536. return;
  3537. /*
  3538. * Assume the largest contiguous order of interest is a huge page.
  3539. * This value may be variable depending on boot parameters on IA64
  3540. */
  3541. pageblock_order = order;
  3542. }
  3543. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3544. /*
  3545. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  3546. * and pageblock_default_order() are unused as pageblock_order is set
  3547. * at compile-time. See include/linux/pageblock-flags.h for the values of
  3548. * pageblock_order based on the kernel config
  3549. */
  3550. static inline int pageblock_default_order(unsigned int order)
  3551. {
  3552. return MAX_ORDER-1;
  3553. }
  3554. #define set_pageblock_order(x) do {} while (0)
  3555. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  3556. /*
  3557. * Set up the zone data structures:
  3558. * - mark all pages reserved
  3559. * - mark all memory queues empty
  3560. * - clear the memory bitmaps
  3561. */
  3562. static void __paginginit free_area_init_core(struct pglist_data *pgdat,
  3563. unsigned long *zones_size, unsigned long *zholes_size)
  3564. {
  3565. enum zone_type j;
  3566. int nid = pgdat->node_id;
  3567. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  3568. int ret;
  3569. pgdat_resize_init(pgdat);
  3570. pgdat->nr_zones = 0;
  3571. init_waitqueue_head(&pgdat->kswapd_wait);
  3572. pgdat->kswapd_max_order = 0;
  3573. pgdat_page_cgroup_init(pgdat);
  3574. for (j = 0; j < MAX_NR_ZONES; j++) {
  3575. struct zone *zone = pgdat->node_zones + j;
  3576. unsigned long size, realsize, memmap_pages;
  3577. enum lru_list l;
  3578. size = zone_spanned_pages_in_node(nid, j, zones_size);
  3579. realsize = size - zone_absent_pages_in_node(nid, j,
  3580. zholes_size);
  3581. /*
  3582. * Adjust realsize so that it accounts for how much memory
  3583. * is used by this zone for memmap. This affects the watermark
  3584. * and per-cpu initialisations
  3585. */
  3586. memmap_pages =
  3587. PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
  3588. if (realsize >= memmap_pages) {
  3589. realsize -= memmap_pages;
  3590. if (memmap_pages)
  3591. printk(KERN_DEBUG
  3592. " %s zone: %lu pages used for memmap\n",
  3593. zone_names[j], memmap_pages);
  3594. } else
  3595. printk(KERN_WARNING
  3596. " %s zone: %lu pages exceeds realsize %lu\n",
  3597. zone_names[j], memmap_pages, realsize);
  3598. /* Account for reserved pages */
  3599. if (j == 0 && realsize > dma_reserve) {
  3600. realsize -= dma_reserve;
  3601. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  3602. zone_names[0], dma_reserve);
  3603. }
  3604. if (!is_highmem_idx(j))
  3605. nr_kernel_pages += realsize;
  3606. nr_all_pages += realsize;
  3607. zone->spanned_pages = size;
  3608. zone->present_pages = realsize;
  3609. #ifdef CONFIG_NUMA
  3610. zone->node = nid;
  3611. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  3612. / 100;
  3613. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  3614. #endif
  3615. zone->name = zone_names[j];
  3616. spin_lock_init(&zone->lock);
  3617. spin_lock_init(&zone->lru_lock);
  3618. zone_seqlock_init(zone);
  3619. zone->zone_pgdat = pgdat;
  3620. zone_pcp_init(zone);
  3621. for_each_lru(l)
  3622. INIT_LIST_HEAD(&zone->lru[l].list);
  3623. zone->reclaim_stat.recent_rotated[0] = 0;
  3624. zone->reclaim_stat.recent_rotated[1] = 0;
  3625. zone->reclaim_stat.recent_scanned[0] = 0;
  3626. zone->reclaim_stat.recent_scanned[1] = 0;
  3627. zap_zone_vm_stats(zone);
  3628. zone->flags = 0;
  3629. if (!size)
  3630. continue;
  3631. set_pageblock_order(pageblock_default_order());
  3632. setup_usemap(pgdat, zone, size);
  3633. ret = init_currently_empty_zone(zone, zone_start_pfn,
  3634. size, MEMMAP_EARLY);
  3635. BUG_ON(ret);
  3636. memmap_init(size, nid, j, zone_start_pfn);
  3637. zone_start_pfn += size;
  3638. }
  3639. }
  3640. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  3641. {
  3642. /* Skip empty nodes */
  3643. if (!pgdat->node_spanned_pages)
  3644. return;
  3645. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3646. /* ia64 gets its own node_mem_map, before this, without bootmem */
  3647. if (!pgdat->node_mem_map) {
  3648. unsigned long size, start, end;
  3649. struct page *map;
  3650. /*
  3651. * The zone's endpoints aren't required to be MAX_ORDER
  3652. * aligned but the node_mem_map endpoints must be in order
  3653. * for the buddy allocator to function correctly.
  3654. */
  3655. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  3656. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  3657. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  3658. size = (end - start) * sizeof(struct page);
  3659. map = alloc_remap(pgdat->node_id, size);
  3660. if (!map)
  3661. map = alloc_bootmem_node_nopanic(pgdat, size);
  3662. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  3663. }
  3664. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3665. /*
  3666. * With no DISCONTIG, the global mem_map is just set as node 0's
  3667. */
  3668. if (pgdat == NODE_DATA(0)) {
  3669. mem_map = NODE_DATA(0)->node_mem_map;
  3670. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3671. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  3672. mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
  3673. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  3674. }
  3675. #endif
  3676. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  3677. }
  3678. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  3679. unsigned long node_start_pfn, unsigned long *zholes_size)
  3680. {
  3681. pg_data_t *pgdat = NODE_DATA(nid);
  3682. pgdat->node_id = nid;
  3683. pgdat->node_start_pfn = node_start_pfn;
  3684. calculate_node_totalpages(pgdat, zones_size, zholes_size);
  3685. alloc_node_mem_map(pgdat);
  3686. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  3687. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  3688. nid, (unsigned long)pgdat,
  3689. (unsigned long)pgdat->node_mem_map);
  3690. #endif
  3691. free_area_init_core(pgdat, zones_size, zholes_size);
  3692. }
  3693. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3694. #if MAX_NUMNODES > 1
  3695. /*
  3696. * Figure out the number of possible node ids.
  3697. */
  3698. static void __init setup_nr_node_ids(void)
  3699. {
  3700. unsigned int node;
  3701. unsigned int highest = 0;
  3702. for_each_node_mask(node, node_possible_map)
  3703. highest = node;
  3704. nr_node_ids = highest + 1;
  3705. }
  3706. #else
  3707. static inline void setup_nr_node_ids(void)
  3708. {
  3709. }
  3710. #endif
  3711. /**
  3712. * node_map_pfn_alignment - determine the maximum internode alignment
  3713. *
  3714. * This function should be called after node map is populated and sorted.
  3715. * It calculates the maximum power of two alignment which can distinguish
  3716. * all the nodes.
  3717. *
  3718. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  3719. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  3720. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  3721. * shifted, 1GiB is enough and this function will indicate so.
  3722. *
  3723. * This is used to test whether pfn -> nid mapping of the chosen memory
  3724. * model has fine enough granularity to avoid incorrect mapping for the
  3725. * populated node map.
  3726. *
  3727. * Returns the determined alignment in pfn's. 0 if there is no alignment
  3728. * requirement (single node).
  3729. */
  3730. unsigned long __init node_map_pfn_alignment(void)
  3731. {
  3732. unsigned long accl_mask = 0, last_end = 0;
  3733. unsigned long start, end, mask;
  3734. int last_nid = -1;
  3735. int i, nid;
  3736. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  3737. if (!start || last_nid < 0 || last_nid == nid) {
  3738. last_nid = nid;
  3739. last_end = end;
  3740. continue;
  3741. }
  3742. /*
  3743. * Start with a mask granular enough to pin-point to the
  3744. * start pfn and tick off bits one-by-one until it becomes
  3745. * too coarse to separate the current node from the last.
  3746. */
  3747. mask = ~((1 << __ffs(start)) - 1);
  3748. while (mask && last_end <= (start & (mask << 1)))
  3749. mask <<= 1;
  3750. /* accumulate all internode masks */
  3751. accl_mask |= mask;
  3752. }
  3753. /* convert mask to number of pages */
  3754. return ~accl_mask + 1;
  3755. }
  3756. /* Find the lowest pfn for a node */
  3757. static unsigned long __init find_min_pfn_for_node(int nid)
  3758. {
  3759. unsigned long min_pfn = ULONG_MAX;
  3760. unsigned long start_pfn;
  3761. int i;
  3762. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  3763. min_pfn = min(min_pfn, start_pfn);
  3764. if (min_pfn == ULONG_MAX) {
  3765. printk(KERN_WARNING
  3766. "Could not find start_pfn for node %d\n", nid);
  3767. return 0;
  3768. }
  3769. return min_pfn;
  3770. }
  3771. /**
  3772. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  3773. *
  3774. * It returns the minimum PFN based on information provided via
  3775. * add_active_range().
  3776. */
  3777. unsigned long __init find_min_pfn_with_active_regions(void)
  3778. {
  3779. return find_min_pfn_for_node(MAX_NUMNODES);
  3780. }
  3781. /*
  3782. * early_calculate_totalpages()
  3783. * Sum pages in active regions for movable zone.
  3784. * Populate N_HIGH_MEMORY for calculating usable_nodes.
  3785. */
  3786. static unsigned long __init early_calculate_totalpages(void)
  3787. {
  3788. unsigned long totalpages = 0;
  3789. unsigned long start_pfn, end_pfn;
  3790. int i, nid;
  3791. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  3792. unsigned long pages = end_pfn - start_pfn;
  3793. totalpages += pages;
  3794. if (pages)
  3795. node_set_state(nid, N_HIGH_MEMORY);
  3796. }
  3797. return totalpages;
  3798. }
  3799. /*
  3800. * Find the PFN the Movable zone begins in each node. Kernel memory
  3801. * is spread evenly between nodes as long as the nodes have enough
  3802. * memory. When they don't, some nodes will have more kernelcore than
  3803. * others
  3804. */
  3805. static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
  3806. {
  3807. int i, nid;
  3808. unsigned long usable_startpfn;
  3809. unsigned long kernelcore_node, kernelcore_remaining;
  3810. /* save the state before borrow the nodemask */
  3811. nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
  3812. unsigned long totalpages = early_calculate_totalpages();
  3813. int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  3814. /*
  3815. * If movablecore was specified, calculate what size of
  3816. * kernelcore that corresponds so that memory usable for
  3817. * any allocation type is evenly spread. If both kernelcore
  3818. * and movablecore are specified, then the value of kernelcore
  3819. * will be used for required_kernelcore if it's greater than
  3820. * what movablecore would have allowed.
  3821. */
  3822. if (required_movablecore) {
  3823. unsigned long corepages;
  3824. /*
  3825. * Round-up so that ZONE_MOVABLE is at least as large as what
  3826. * was requested by the user
  3827. */
  3828. required_movablecore =
  3829. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  3830. corepages = totalpages - required_movablecore;
  3831. required_kernelcore = max(required_kernelcore, corepages);
  3832. }
  3833. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  3834. if (!required_kernelcore)
  3835. goto out;
  3836. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  3837. find_usable_zone_for_movable();
  3838. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  3839. restart:
  3840. /* Spread kernelcore memory as evenly as possible throughout nodes */
  3841. kernelcore_node = required_kernelcore / usable_nodes;
  3842. for_each_node_state(nid, N_HIGH_MEMORY) {
  3843. unsigned long start_pfn, end_pfn;
  3844. /*
  3845. * Recalculate kernelcore_node if the division per node
  3846. * now exceeds what is necessary to satisfy the requested
  3847. * amount of memory for the kernel
  3848. */
  3849. if (required_kernelcore < kernelcore_node)
  3850. kernelcore_node = required_kernelcore / usable_nodes;
  3851. /*
  3852. * As the map is walked, we track how much memory is usable
  3853. * by the kernel using kernelcore_remaining. When it is
  3854. * 0, the rest of the node is usable by ZONE_MOVABLE
  3855. */
  3856. kernelcore_remaining = kernelcore_node;
  3857. /* Go through each range of PFNs within this node */
  3858. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  3859. unsigned long size_pages;
  3860. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  3861. if (start_pfn >= end_pfn)
  3862. continue;
  3863. /* Account for what is only usable for kernelcore */
  3864. if (start_pfn < usable_startpfn) {
  3865. unsigned long kernel_pages;
  3866. kernel_pages = min(end_pfn, usable_startpfn)
  3867. - start_pfn;
  3868. kernelcore_remaining -= min(kernel_pages,
  3869. kernelcore_remaining);
  3870. required_kernelcore -= min(kernel_pages,
  3871. required_kernelcore);
  3872. /* Continue if range is now fully accounted */
  3873. if (end_pfn <= usable_startpfn) {
  3874. /*
  3875. * Push zone_movable_pfn to the end so
  3876. * that if we have to rebalance
  3877. * kernelcore across nodes, we will
  3878. * not double account here
  3879. */
  3880. zone_movable_pfn[nid] = end_pfn;
  3881. continue;
  3882. }
  3883. start_pfn = usable_startpfn;
  3884. }
  3885. /*
  3886. * The usable PFN range for ZONE_MOVABLE is from
  3887. * start_pfn->end_pfn. Calculate size_pages as the
  3888. * number of pages used as kernelcore
  3889. */
  3890. size_pages = end_pfn - start_pfn;
  3891. if (size_pages > kernelcore_remaining)
  3892. size_pages = kernelcore_remaining;
  3893. zone_movable_pfn[nid] = start_pfn + size_pages;
  3894. /*
  3895. * Some kernelcore has been met, update counts and
  3896. * break if the kernelcore for this node has been
  3897. * satisified
  3898. */
  3899. required_kernelcore -= min(required_kernelcore,
  3900. size_pages);
  3901. kernelcore_remaining -= size_pages;
  3902. if (!kernelcore_remaining)
  3903. break;
  3904. }
  3905. }
  3906. /*
  3907. * If there is still required_kernelcore, we do another pass with one
  3908. * less node in the count. This will push zone_movable_pfn[nid] further
  3909. * along on the nodes that still have memory until kernelcore is
  3910. * satisified
  3911. */
  3912. usable_nodes--;
  3913. if (usable_nodes && required_kernelcore > usable_nodes)
  3914. goto restart;
  3915. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  3916. for (nid = 0; nid < MAX_NUMNODES; nid++)
  3917. zone_movable_pfn[nid] =
  3918. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  3919. out:
  3920. /* restore the node_state */
  3921. node_states[N_HIGH_MEMORY] = saved_node_state;
  3922. }
  3923. /* Any regular memory on that node ? */
  3924. static void check_for_regular_memory(pg_data_t *pgdat)
  3925. {
  3926. #ifdef CONFIG_HIGHMEM
  3927. enum zone_type zone_type;
  3928. for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
  3929. struct zone *zone = &pgdat->node_zones[zone_type];
  3930. if (zone->present_pages)
  3931. node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
  3932. }
  3933. #endif
  3934. }
  3935. /**
  3936. * free_area_init_nodes - Initialise all pg_data_t and zone data
  3937. * @max_zone_pfn: an array of max PFNs for each zone
  3938. *
  3939. * This will call free_area_init_node() for each active node in the system.
  3940. * Using the page ranges provided by add_active_range(), the size of each
  3941. * zone in each node and their holes is calculated. If the maximum PFN
  3942. * between two adjacent zones match, it is assumed that the zone is empty.
  3943. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  3944. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  3945. * starts where the previous one ended. For example, ZONE_DMA32 starts
  3946. * at arch_max_dma_pfn.
  3947. */
  3948. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  3949. {
  3950. unsigned long start_pfn, end_pfn;
  3951. int i, nid;
  3952. /* Record where the zone boundaries are */
  3953. memset(arch_zone_lowest_possible_pfn, 0,
  3954. sizeof(arch_zone_lowest_possible_pfn));
  3955. memset(arch_zone_highest_possible_pfn, 0,
  3956. sizeof(arch_zone_highest_possible_pfn));
  3957. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  3958. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  3959. for (i = 1; i < MAX_NR_ZONES; i++) {
  3960. if (i == ZONE_MOVABLE)
  3961. continue;
  3962. arch_zone_lowest_possible_pfn[i] =
  3963. arch_zone_highest_possible_pfn[i-1];
  3964. arch_zone_highest_possible_pfn[i] =
  3965. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  3966. }
  3967. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  3968. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  3969. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  3970. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  3971. find_zone_movable_pfns_for_nodes(zone_movable_pfn);
  3972. /* Print out the zone ranges */
  3973. printk("Zone PFN ranges:\n");
  3974. for (i = 0; i < MAX_NR_ZONES; i++) {
  3975. if (i == ZONE_MOVABLE)
  3976. continue;
  3977. printk(" %-8s ", zone_names[i]);
  3978. if (arch_zone_lowest_possible_pfn[i] ==
  3979. arch_zone_highest_possible_pfn[i])
  3980. printk("empty\n");
  3981. else
  3982. printk("%0#10lx -> %0#10lx\n",
  3983. arch_zone_lowest_possible_pfn[i],
  3984. arch_zone_highest_possible_pfn[i]);
  3985. }
  3986. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  3987. printk("Movable zone start PFN for each node\n");
  3988. for (i = 0; i < MAX_NUMNODES; i++) {
  3989. if (zone_movable_pfn[i])
  3990. printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
  3991. }
  3992. /* Print out the early_node_map[] */
  3993. printk("Early memory PFN ranges\n");
  3994. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  3995. printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
  3996. /* Initialise every node */
  3997. mminit_verify_pageflags_layout();
  3998. setup_nr_node_ids();
  3999. for_each_online_node(nid) {
  4000. pg_data_t *pgdat = NODE_DATA(nid);
  4001. free_area_init_node(nid, NULL,
  4002. find_min_pfn_for_node(nid), NULL);
  4003. /* Any memory on that node */
  4004. if (pgdat->node_present_pages)
  4005. node_set_state(nid, N_HIGH_MEMORY);
  4006. check_for_regular_memory(pgdat);
  4007. }
  4008. }
  4009. static int __init cmdline_parse_core(char *p, unsigned long *core)
  4010. {
  4011. unsigned long long coremem;
  4012. if (!p)
  4013. return -EINVAL;
  4014. coremem = memparse(p, &p);
  4015. *core = coremem >> PAGE_SHIFT;
  4016. /* Paranoid check that UL is enough for the coremem value */
  4017. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  4018. return 0;
  4019. }
  4020. /*
  4021. * kernelcore=size sets the amount of memory for use for allocations that
  4022. * cannot be reclaimed or migrated.
  4023. */
  4024. static int __init cmdline_parse_kernelcore(char *p)
  4025. {
  4026. return cmdline_parse_core(p, &required_kernelcore);
  4027. }
  4028. /*
  4029. * movablecore=size sets the amount of memory for use for allocations that
  4030. * can be reclaimed or migrated.
  4031. */
  4032. static int __init cmdline_parse_movablecore(char *p)
  4033. {
  4034. return cmdline_parse_core(p, &required_movablecore);
  4035. }
  4036. early_param("kernelcore", cmdline_parse_kernelcore);
  4037. early_param("movablecore", cmdline_parse_movablecore);
  4038. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4039. /**
  4040. * set_dma_reserve - set the specified number of pages reserved in the first zone
  4041. * @new_dma_reserve: The number of pages to mark reserved
  4042. *
  4043. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  4044. * In the DMA zone, a significant percentage may be consumed by kernel image
  4045. * and other unfreeable allocations which can skew the watermarks badly. This
  4046. * function may optionally be used to account for unfreeable pages in the
  4047. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  4048. * smaller per-cpu batchsize.
  4049. */
  4050. void __init set_dma_reserve(unsigned long new_dma_reserve)
  4051. {
  4052. dma_reserve = new_dma_reserve;
  4053. }
  4054. void __init free_area_init(unsigned long *zones_size)
  4055. {
  4056. free_area_init_node(0, zones_size,
  4057. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  4058. }
  4059. static int page_alloc_cpu_notify(struct notifier_block *self,
  4060. unsigned long action, void *hcpu)
  4061. {
  4062. int cpu = (unsigned long)hcpu;
  4063. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  4064. drain_pages(cpu);
  4065. /*
  4066. * Spill the event counters of the dead processor
  4067. * into the current processors event counters.
  4068. * This artificially elevates the count of the current
  4069. * processor.
  4070. */
  4071. vm_events_fold_cpu(cpu);
  4072. /*
  4073. * Zero the differential counters of the dead processor
  4074. * so that the vm statistics are consistent.
  4075. *
  4076. * This is only okay since the processor is dead and cannot
  4077. * race with what we are doing.
  4078. */
  4079. refresh_cpu_vm_stats(cpu);
  4080. }
  4081. return NOTIFY_OK;
  4082. }
  4083. void __init page_alloc_init(void)
  4084. {
  4085. hotcpu_notifier(page_alloc_cpu_notify, 0);
  4086. }
  4087. /*
  4088. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  4089. * or min_free_kbytes changes.
  4090. */
  4091. static void calculate_totalreserve_pages(void)
  4092. {
  4093. struct pglist_data *pgdat;
  4094. unsigned long reserve_pages = 0;
  4095. enum zone_type i, j;
  4096. for_each_online_pgdat(pgdat) {
  4097. for (i = 0; i < MAX_NR_ZONES; i++) {
  4098. struct zone *zone = pgdat->node_zones + i;
  4099. unsigned long max = 0;
  4100. /* Find valid and maximum lowmem_reserve in the zone */
  4101. for (j = i; j < MAX_NR_ZONES; j++) {
  4102. if (zone->lowmem_reserve[j] > max)
  4103. max = zone->lowmem_reserve[j];
  4104. }
  4105. /* we treat the high watermark as reserved pages. */
  4106. max += high_wmark_pages(zone);
  4107. if (max > zone->present_pages)
  4108. max = zone->present_pages;
  4109. reserve_pages += max;
  4110. }
  4111. }
  4112. totalreserve_pages = reserve_pages;
  4113. }
  4114. /*
  4115. * setup_per_zone_lowmem_reserve - called whenever
  4116. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  4117. * has a correct pages reserved value, so an adequate number of
  4118. * pages are left in the zone after a successful __alloc_pages().
  4119. */
  4120. static void setup_per_zone_lowmem_reserve(void)
  4121. {
  4122. struct pglist_data *pgdat;
  4123. enum zone_type j, idx;
  4124. for_each_online_pgdat(pgdat) {
  4125. for (j = 0; j < MAX_NR_ZONES; j++) {
  4126. struct zone *zone = pgdat->node_zones + j;
  4127. unsigned long present_pages = zone->present_pages;
  4128. zone->lowmem_reserve[j] = 0;
  4129. idx = j;
  4130. while (idx) {
  4131. struct zone *lower_zone;
  4132. idx--;
  4133. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  4134. sysctl_lowmem_reserve_ratio[idx] = 1;
  4135. lower_zone = pgdat->node_zones + idx;
  4136. lower_zone->lowmem_reserve[j] = present_pages /
  4137. sysctl_lowmem_reserve_ratio[idx];
  4138. present_pages += lower_zone->present_pages;
  4139. }
  4140. }
  4141. }
  4142. /* update totalreserve_pages */
  4143. calculate_totalreserve_pages();
  4144. }
  4145. /**
  4146. * setup_per_zone_wmarks - called when min_free_kbytes changes
  4147. * or when memory is hot-{added|removed}
  4148. *
  4149. * Ensures that the watermark[min,low,high] values for each zone are set
  4150. * correctly with respect to min_free_kbytes.
  4151. */
  4152. void setup_per_zone_wmarks(void)
  4153. {
  4154. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  4155. unsigned long lowmem_pages = 0;
  4156. struct zone *zone;
  4157. unsigned long flags;
  4158. /* Calculate total number of !ZONE_HIGHMEM pages */
  4159. for_each_zone(zone) {
  4160. if (!is_highmem(zone))
  4161. lowmem_pages += zone->present_pages;
  4162. }
  4163. for_each_zone(zone) {
  4164. u64 tmp;
  4165. spin_lock_irqsave(&zone->lock, flags);
  4166. tmp = (u64)pages_min * zone->present_pages;
  4167. do_div(tmp, lowmem_pages);
  4168. if (is_highmem(zone)) {
  4169. /*
  4170. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  4171. * need highmem pages, so cap pages_min to a small
  4172. * value here.
  4173. *
  4174. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  4175. * deltas controls asynch page reclaim, and so should
  4176. * not be capped for highmem.
  4177. */
  4178. int min_pages;
  4179. min_pages = zone->present_pages / 1024;
  4180. if (min_pages < SWAP_CLUSTER_MAX)
  4181. min_pages = SWAP_CLUSTER_MAX;
  4182. if (min_pages > 128)
  4183. min_pages = 128;
  4184. zone->watermark[WMARK_MIN] = min_pages;
  4185. } else {
  4186. /*
  4187. * If it's a lowmem zone, reserve a number of pages
  4188. * proportionate to the zone's size.
  4189. */
  4190. zone->watermark[WMARK_MIN] = tmp;
  4191. }
  4192. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
  4193. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
  4194. setup_zone_migrate_reserve(zone);
  4195. spin_unlock_irqrestore(&zone->lock, flags);
  4196. }
  4197. /* update totalreserve_pages */
  4198. calculate_totalreserve_pages();
  4199. }
  4200. /*
  4201. * The inactive anon list should be small enough that the VM never has to
  4202. * do too much work, but large enough that each inactive page has a chance
  4203. * to be referenced again before it is swapped out.
  4204. *
  4205. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  4206. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  4207. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  4208. * the anonymous pages are kept on the inactive list.
  4209. *
  4210. * total target max
  4211. * memory ratio inactive anon
  4212. * -------------------------------------
  4213. * 10MB 1 5MB
  4214. * 100MB 1 50MB
  4215. * 1GB 3 250MB
  4216. * 10GB 10 0.9GB
  4217. * 100GB 31 3GB
  4218. * 1TB 101 10GB
  4219. * 10TB 320 32GB
  4220. */
  4221. static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
  4222. {
  4223. unsigned int gb, ratio;
  4224. /* Zone size in gigabytes */
  4225. gb = zone->present_pages >> (30 - PAGE_SHIFT);
  4226. if (gb)
  4227. ratio = int_sqrt(10 * gb);
  4228. else
  4229. ratio = 1;
  4230. zone->inactive_ratio = ratio;
  4231. }
  4232. static void __meminit setup_per_zone_inactive_ratio(void)
  4233. {
  4234. struct zone *zone;
  4235. for_each_zone(zone)
  4236. calculate_zone_inactive_ratio(zone);
  4237. }
  4238. /*
  4239. * Initialise min_free_kbytes.
  4240. *
  4241. * For small machines we want it small (128k min). For large machines
  4242. * we want it large (64MB max). But it is not linear, because network
  4243. * bandwidth does not increase linearly with machine size. We use
  4244. *
  4245. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  4246. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  4247. *
  4248. * which yields
  4249. *
  4250. * 16MB: 512k
  4251. * 32MB: 724k
  4252. * 64MB: 1024k
  4253. * 128MB: 1448k
  4254. * 256MB: 2048k
  4255. * 512MB: 2896k
  4256. * 1024MB: 4096k
  4257. * 2048MB: 5792k
  4258. * 4096MB: 8192k
  4259. * 8192MB: 11584k
  4260. * 16384MB: 16384k
  4261. */
  4262. int __meminit init_per_zone_wmark_min(void)
  4263. {
  4264. unsigned long lowmem_kbytes;
  4265. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  4266. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  4267. if (min_free_kbytes < 128)
  4268. min_free_kbytes = 128;
  4269. if (min_free_kbytes > 65536)
  4270. min_free_kbytes = 65536;
  4271. setup_per_zone_wmarks();
  4272. refresh_zone_stat_thresholds();
  4273. setup_per_zone_lowmem_reserve();
  4274. setup_per_zone_inactive_ratio();
  4275. return 0;
  4276. }
  4277. module_init(init_per_zone_wmark_min)
  4278. /*
  4279. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  4280. * that we can call two helper functions whenever min_free_kbytes
  4281. * changes.
  4282. */
  4283. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  4284. void __user *buffer, size_t *length, loff_t *ppos)
  4285. {
  4286. proc_dointvec(table, write, buffer, length, ppos);
  4287. if (write)
  4288. setup_per_zone_wmarks();
  4289. return 0;
  4290. }
  4291. #ifdef CONFIG_NUMA
  4292. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  4293. void __user *buffer, size_t *length, loff_t *ppos)
  4294. {
  4295. struct zone *zone;
  4296. int rc;
  4297. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4298. if (rc)
  4299. return rc;
  4300. for_each_zone(zone)
  4301. zone->min_unmapped_pages = (zone->present_pages *
  4302. sysctl_min_unmapped_ratio) / 100;
  4303. return 0;
  4304. }
  4305. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  4306. void __user *buffer, size_t *length, loff_t *ppos)
  4307. {
  4308. struct zone *zone;
  4309. int rc;
  4310. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4311. if (rc)
  4312. return rc;
  4313. for_each_zone(zone)
  4314. zone->min_slab_pages = (zone->present_pages *
  4315. sysctl_min_slab_ratio) / 100;
  4316. return 0;
  4317. }
  4318. #endif
  4319. /*
  4320. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  4321. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  4322. * whenever sysctl_lowmem_reserve_ratio changes.
  4323. *
  4324. * The reserve ratio obviously has absolutely no relation with the
  4325. * minimum watermarks. The lowmem reserve ratio can only make sense
  4326. * if in function of the boot time zone sizes.
  4327. */
  4328. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  4329. void __user *buffer, size_t *length, loff_t *ppos)
  4330. {
  4331. proc_dointvec_minmax(table, write, buffer, length, ppos);
  4332. setup_per_zone_lowmem_reserve();
  4333. return 0;
  4334. }
  4335. /*
  4336. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  4337. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  4338. * can have before it gets flushed back to buddy allocator.
  4339. */
  4340. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  4341. void __user *buffer, size_t *length, loff_t *ppos)
  4342. {
  4343. struct zone *zone;
  4344. unsigned int cpu;
  4345. int ret;
  4346. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  4347. if (!write || (ret == -EINVAL))
  4348. return ret;
  4349. for_each_populated_zone(zone) {
  4350. for_each_possible_cpu(cpu) {
  4351. unsigned long high;
  4352. high = zone->present_pages / percpu_pagelist_fraction;
  4353. setup_pagelist_highmark(
  4354. per_cpu_ptr(zone->pageset, cpu), high);
  4355. }
  4356. }
  4357. return 0;
  4358. }
  4359. int hashdist = HASHDIST_DEFAULT;
  4360. #ifdef CONFIG_NUMA
  4361. static int __init set_hashdist(char *str)
  4362. {
  4363. if (!str)
  4364. return 0;
  4365. hashdist = simple_strtoul(str, &str, 0);
  4366. return 1;
  4367. }
  4368. __setup("hashdist=", set_hashdist);
  4369. #endif
  4370. /*
  4371. * allocate a large system hash table from bootmem
  4372. * - it is assumed that the hash table must contain an exact power-of-2
  4373. * quantity of entries
  4374. * - limit is the number of hash buckets, not the total allocation size
  4375. */
  4376. void *__init alloc_large_system_hash(const char *tablename,
  4377. unsigned long bucketsize,
  4378. unsigned long numentries,
  4379. int scale,
  4380. int flags,
  4381. unsigned int *_hash_shift,
  4382. unsigned int *_hash_mask,
  4383. unsigned long limit)
  4384. {
  4385. unsigned long long max = limit;
  4386. unsigned long log2qty, size;
  4387. void *table = NULL;
  4388. /* allow the kernel cmdline to have a say */
  4389. if (!numentries) {
  4390. /* round applicable memory size up to nearest megabyte */
  4391. numentries = nr_kernel_pages;
  4392. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  4393. numentries >>= 20 - PAGE_SHIFT;
  4394. numentries <<= 20 - PAGE_SHIFT;
  4395. /* limit to 1 bucket per 2^scale bytes of low memory */
  4396. if (scale > PAGE_SHIFT)
  4397. numentries >>= (scale - PAGE_SHIFT);
  4398. else
  4399. numentries <<= (PAGE_SHIFT - scale);
  4400. /* Make sure we've got at least a 0-order allocation.. */
  4401. if (unlikely(flags & HASH_SMALL)) {
  4402. /* Makes no sense without HASH_EARLY */
  4403. WARN_ON(!(flags & HASH_EARLY));
  4404. if (!(numentries >> *_hash_shift)) {
  4405. numentries = 1UL << *_hash_shift;
  4406. BUG_ON(!numentries);
  4407. }
  4408. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  4409. numentries = PAGE_SIZE / bucketsize;
  4410. }
  4411. numentries = roundup_pow_of_two(numentries);
  4412. /* limit allocation size to 1/16 total memory by default */
  4413. if (max == 0) {
  4414. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  4415. do_div(max, bucketsize);
  4416. }
  4417. if (numentries > max)
  4418. numentries = max;
  4419. log2qty = ilog2(numentries);
  4420. do {
  4421. size = bucketsize << log2qty;
  4422. if (flags & HASH_EARLY)
  4423. table = alloc_bootmem_nopanic(size);
  4424. else if (hashdist)
  4425. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  4426. else {
  4427. /*
  4428. * If bucketsize is not a power-of-two, we may free
  4429. * some pages at the end of hash table which
  4430. * alloc_pages_exact() automatically does
  4431. */
  4432. if (get_order(size) < MAX_ORDER) {
  4433. table = alloc_pages_exact(size, GFP_ATOMIC);
  4434. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  4435. }
  4436. }
  4437. } while (!table && size > PAGE_SIZE && --log2qty);
  4438. if (!table)
  4439. panic("Failed to allocate %s hash table\n", tablename);
  4440. printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
  4441. tablename,
  4442. (1UL << log2qty),
  4443. ilog2(size) - PAGE_SHIFT,
  4444. size);
  4445. if (_hash_shift)
  4446. *_hash_shift = log2qty;
  4447. if (_hash_mask)
  4448. *_hash_mask = (1 << log2qty) - 1;
  4449. return table;
  4450. }
  4451. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  4452. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  4453. unsigned long pfn)
  4454. {
  4455. #ifdef CONFIG_SPARSEMEM
  4456. return __pfn_to_section(pfn)->pageblock_flags;
  4457. #else
  4458. return zone->pageblock_flags;
  4459. #endif /* CONFIG_SPARSEMEM */
  4460. }
  4461. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  4462. {
  4463. #ifdef CONFIG_SPARSEMEM
  4464. pfn &= (PAGES_PER_SECTION-1);
  4465. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4466. #else
  4467. pfn = pfn - zone->zone_start_pfn;
  4468. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  4469. #endif /* CONFIG_SPARSEMEM */
  4470. }
  4471. /**
  4472. * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
  4473. * @page: The page within the block of interest
  4474. * @start_bitidx: The first bit of interest to retrieve
  4475. * @end_bitidx: The last bit of interest
  4476. * returns pageblock_bits flags
  4477. */
  4478. unsigned long get_pageblock_flags_group(struct page *page,
  4479. int start_bitidx, int end_bitidx)
  4480. {
  4481. struct zone *zone;
  4482. unsigned long *bitmap;
  4483. unsigned long pfn, bitidx;
  4484. unsigned long flags = 0;
  4485. unsigned long value = 1;
  4486. zone = page_zone(page);
  4487. pfn = page_to_pfn(page);
  4488. bitmap = get_pageblock_bitmap(zone, pfn);
  4489. bitidx = pfn_to_bitidx(zone, pfn);
  4490. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4491. if (test_bit(bitidx + start_bitidx, bitmap))
  4492. flags |= value;
  4493. return flags;
  4494. }
  4495. /**
  4496. * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
  4497. * @page: The page within the block of interest
  4498. * @start_bitidx: The first bit of interest
  4499. * @end_bitidx: The last bit of interest
  4500. * @flags: The flags to set
  4501. */
  4502. void set_pageblock_flags_group(struct page *page, unsigned long flags,
  4503. int start_bitidx, int end_bitidx)
  4504. {
  4505. struct zone *zone;
  4506. unsigned long *bitmap;
  4507. unsigned long pfn, bitidx;
  4508. unsigned long value = 1;
  4509. zone = page_zone(page);
  4510. pfn = page_to_pfn(page);
  4511. bitmap = get_pageblock_bitmap(zone, pfn);
  4512. bitidx = pfn_to_bitidx(zone, pfn);
  4513. VM_BUG_ON(pfn < zone->zone_start_pfn);
  4514. VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
  4515. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  4516. if (flags & value)
  4517. __set_bit(bitidx + start_bitidx, bitmap);
  4518. else
  4519. __clear_bit(bitidx + start_bitidx, bitmap);
  4520. }
  4521. /*
  4522. * This is designed as sub function...plz see page_isolation.c also.
  4523. * set/clear page block's type to be ISOLATE.
  4524. * page allocater never alloc memory from ISOLATE block.
  4525. */
  4526. static int
  4527. __count_immobile_pages(struct zone *zone, struct page *page, int count)
  4528. {
  4529. unsigned long pfn, iter, found;
  4530. /*
  4531. * For avoiding noise data, lru_add_drain_all() should be called
  4532. * If ZONE_MOVABLE, the zone never contains immobile pages
  4533. */
  4534. if (zone_idx(zone) == ZONE_MOVABLE)
  4535. return true;
  4536. if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
  4537. return true;
  4538. pfn = page_to_pfn(page);
  4539. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  4540. unsigned long check = pfn + iter;
  4541. if (!pfn_valid_within(check))
  4542. continue;
  4543. page = pfn_to_page(check);
  4544. if (!page_count(page)) {
  4545. if (PageBuddy(page))
  4546. iter += (1 << page_order(page)) - 1;
  4547. continue;
  4548. }
  4549. if (!PageLRU(page))
  4550. found++;
  4551. /*
  4552. * If there are RECLAIMABLE pages, we need to check it.
  4553. * But now, memory offline itself doesn't call shrink_slab()
  4554. * and it still to be fixed.
  4555. */
  4556. /*
  4557. * If the page is not RAM, page_count()should be 0.
  4558. * we don't need more check. This is an _used_ not-movable page.
  4559. *
  4560. * The problematic thing here is PG_reserved pages. PG_reserved
  4561. * is set to both of a memory hole page and a _used_ kernel
  4562. * page at boot.
  4563. */
  4564. if (found > count)
  4565. return false;
  4566. }
  4567. return true;
  4568. }
  4569. bool is_pageblock_removable_nolock(struct page *page)
  4570. {
  4571. struct zone *zone = page_zone(page);
  4572. return __count_immobile_pages(zone, page, 0);
  4573. }
  4574. int set_migratetype_isolate(struct page *page)
  4575. {
  4576. struct zone *zone;
  4577. unsigned long flags, pfn;
  4578. struct memory_isolate_notify arg;
  4579. int notifier_ret;
  4580. int ret = -EBUSY;
  4581. zone = page_zone(page);
  4582. spin_lock_irqsave(&zone->lock, flags);
  4583. pfn = page_to_pfn(page);
  4584. arg.start_pfn = pfn;
  4585. arg.nr_pages = pageblock_nr_pages;
  4586. arg.pages_found = 0;
  4587. /*
  4588. * It may be possible to isolate a pageblock even if the
  4589. * migratetype is not MIGRATE_MOVABLE. The memory isolation
  4590. * notifier chain is used by balloon drivers to return the
  4591. * number of pages in a range that are held by the balloon
  4592. * driver to shrink memory. If all the pages are accounted for
  4593. * by balloons, are free, or on the LRU, isolation can continue.
  4594. * Later, for example, when memory hotplug notifier runs, these
  4595. * pages reported as "can be isolated" should be isolated(freed)
  4596. * by the balloon driver through the memory notifier chain.
  4597. */
  4598. notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  4599. notifier_ret = notifier_to_errno(notifier_ret);
  4600. if (notifier_ret)
  4601. goto out;
  4602. /*
  4603. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  4604. * We just check MOVABLE pages.
  4605. */
  4606. if (__count_immobile_pages(zone, page, arg.pages_found))
  4607. ret = 0;
  4608. /*
  4609. * immobile means "not-on-lru" paes. If immobile is larger than
  4610. * removable-by-driver pages reported by notifier, we'll fail.
  4611. */
  4612. out:
  4613. if (!ret) {
  4614. set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  4615. move_freepages_block(zone, page, MIGRATE_ISOLATE);
  4616. }
  4617. spin_unlock_irqrestore(&zone->lock, flags);
  4618. if (!ret)
  4619. drain_all_pages();
  4620. return ret;
  4621. }
  4622. void unset_migratetype_isolate(struct page *page)
  4623. {
  4624. struct zone *zone;
  4625. unsigned long flags;
  4626. zone = page_zone(page);
  4627. spin_lock_irqsave(&zone->lock, flags);
  4628. if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  4629. goto out;
  4630. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4631. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  4632. out:
  4633. spin_unlock_irqrestore(&zone->lock, flags);
  4634. }
  4635. #ifdef CONFIG_MEMORY_HOTREMOVE
  4636. /*
  4637. * All pages in the range must be isolated before calling this.
  4638. */
  4639. void
  4640. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  4641. {
  4642. struct page *page;
  4643. struct zone *zone;
  4644. int order, i;
  4645. unsigned long pfn;
  4646. unsigned long flags;
  4647. /* find the first valid pfn */
  4648. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  4649. if (pfn_valid(pfn))
  4650. break;
  4651. if (pfn == end_pfn)
  4652. return;
  4653. zone = page_zone(pfn_to_page(pfn));
  4654. spin_lock_irqsave(&zone->lock, flags);
  4655. pfn = start_pfn;
  4656. while (pfn < end_pfn) {
  4657. if (!pfn_valid(pfn)) {
  4658. pfn++;
  4659. continue;
  4660. }
  4661. page = pfn_to_page(pfn);
  4662. BUG_ON(page_count(page));
  4663. BUG_ON(!PageBuddy(page));
  4664. order = page_order(page);
  4665. #ifdef CONFIG_DEBUG_VM
  4666. printk(KERN_INFO "remove from free list %lx %d %lx\n",
  4667. pfn, 1 << order, end_pfn);
  4668. #endif
  4669. list_del(&page->lru);
  4670. rmv_page_order(page);
  4671. zone->free_area[order].nr_free--;
  4672. __mod_zone_page_state(zone, NR_FREE_PAGES,
  4673. - (1UL << order));
  4674. for (i = 0; i < (1 << order); i++)
  4675. SetPageReserved((page+i));
  4676. pfn += (1 << order);
  4677. }
  4678. spin_unlock_irqrestore(&zone->lock, flags);
  4679. }
  4680. #endif
  4681. #ifdef CONFIG_MEMORY_FAILURE
  4682. bool is_free_buddy_page(struct page *page)
  4683. {
  4684. struct zone *zone = page_zone(page);
  4685. unsigned long pfn = page_to_pfn(page);
  4686. unsigned long flags;
  4687. int order;
  4688. spin_lock_irqsave(&zone->lock, flags);
  4689. for (order = 0; order < MAX_ORDER; order++) {
  4690. struct page *page_head = page - (pfn & ((1 << order) - 1));
  4691. if (PageBuddy(page_head) && page_order(page_head) >= order)
  4692. break;
  4693. }
  4694. spin_unlock_irqrestore(&zone->lock, flags);
  4695. return order < MAX_ORDER;
  4696. }
  4697. #endif
  4698. static struct trace_print_flags pageflag_names[] = {
  4699. {1UL << PG_locked, "locked" },
  4700. {1UL << PG_error, "error" },
  4701. {1UL << PG_referenced, "referenced" },
  4702. {1UL << PG_uptodate, "uptodate" },
  4703. {1UL << PG_dirty, "dirty" },
  4704. {1UL << PG_lru, "lru" },
  4705. {1UL << PG_active, "active" },
  4706. {1UL << PG_slab, "slab" },
  4707. {1UL << PG_owner_priv_1, "owner_priv_1" },
  4708. {1UL << PG_arch_1, "arch_1" },
  4709. {1UL << PG_reserved, "reserved" },
  4710. {1UL << PG_private, "private" },
  4711. {1UL << PG_private_2, "private_2" },
  4712. {1UL << PG_writeback, "writeback" },
  4713. #ifdef CONFIG_PAGEFLAGS_EXTENDED
  4714. {1UL << PG_head, "head" },
  4715. {1UL << PG_tail, "tail" },
  4716. #else
  4717. {1UL << PG_compound, "compound" },
  4718. #endif
  4719. {1UL << PG_swapcache, "swapcache" },
  4720. {1UL << PG_mappedtodisk, "mappedtodisk" },
  4721. {1UL << PG_reclaim, "reclaim" },
  4722. {1UL << PG_swapbacked, "swapbacked" },
  4723. {1UL << PG_unevictable, "unevictable" },
  4724. #ifdef CONFIG_MMU
  4725. {1UL << PG_mlocked, "mlocked" },
  4726. #endif
  4727. #ifdef CONFIG_ARCH_USES_PG_UNCACHED
  4728. {1UL << PG_uncached, "uncached" },
  4729. #endif
  4730. #ifdef CONFIG_MEMORY_FAILURE
  4731. {1UL << PG_hwpoison, "hwpoison" },
  4732. #endif
  4733. {-1UL, NULL },
  4734. };
  4735. static void dump_page_flags(unsigned long flags)
  4736. {
  4737. const char *delim = "";
  4738. unsigned long mask;
  4739. int i;
  4740. printk(KERN_ALERT "page flags: %#lx(", flags);
  4741. /* remove zone id */
  4742. flags &= (1UL << NR_PAGEFLAGS) - 1;
  4743. for (i = 0; pageflag_names[i].name && flags; i++) {
  4744. mask = pageflag_names[i].mask;
  4745. if ((flags & mask) != mask)
  4746. continue;
  4747. flags &= ~mask;
  4748. printk("%s%s", delim, pageflag_names[i].name);
  4749. delim = "|";
  4750. }
  4751. /* check for left over flags */
  4752. if (flags)
  4753. printk("%s%#lx", delim, flags);
  4754. printk(")\n");
  4755. }
  4756. void dump_page(struct page *page)
  4757. {
  4758. printk(KERN_ALERT
  4759. "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
  4760. page, atomic_read(&page->_count), page_mapcount(page),
  4761. page->mapping, page->index);
  4762. dump_page_flags(page->flags);
  4763. mem_cgroup_print_bad_page(page);
  4764. }