page_alloc.c 120 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/compiler.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/suspend.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/slab.h>
  29. #include <linux/notifier.h>
  30. #include <linux/topology.h>
  31. #include <linux/sysctl.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/memory_hotplug.h>
  35. #include <linux/nodemask.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/mempolicy.h>
  38. #include <linux/stop_machine.h>
  39. #include <linux/sort.h>
  40. #include <linux/pfn.h>
  41. #include <linux/backing-dev.h>
  42. #include <linux/fault-inject.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/div64.h>
  45. #include "internal.h"
  46. /*
  47. * Array of node states.
  48. */
  49. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  50. [N_POSSIBLE] = NODE_MASK_ALL,
  51. [N_ONLINE] = { { [0] = 1UL } },
  52. #ifndef CONFIG_NUMA
  53. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  54. #ifdef CONFIG_HIGHMEM
  55. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  56. #endif
  57. [N_CPU] = { { [0] = 1UL } },
  58. #endif /* NUMA */
  59. };
  60. EXPORT_SYMBOL(node_states);
  61. unsigned long totalram_pages __read_mostly;
  62. unsigned long totalreserve_pages __read_mostly;
  63. long nr_swap_pages;
  64. int percpu_pagelist_fraction;
  65. static void __free_pages_ok(struct page *page, unsigned int order);
  66. /*
  67. * results with 256, 32 in the lowmem_reserve sysctl:
  68. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  69. * 1G machine -> (16M dma, 784M normal, 224M high)
  70. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  71. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  72. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  73. *
  74. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  75. * don't need any ZONE_NORMAL reservation
  76. */
  77. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  78. #ifdef CONFIG_ZONE_DMA
  79. 256,
  80. #endif
  81. #ifdef CONFIG_ZONE_DMA32
  82. 256,
  83. #endif
  84. #ifdef CONFIG_HIGHMEM
  85. 32,
  86. #endif
  87. 32,
  88. };
  89. EXPORT_SYMBOL(totalram_pages);
  90. static char * const zone_names[MAX_NR_ZONES] = {
  91. #ifdef CONFIG_ZONE_DMA
  92. "DMA",
  93. #endif
  94. #ifdef CONFIG_ZONE_DMA32
  95. "DMA32",
  96. #endif
  97. "Normal",
  98. #ifdef CONFIG_HIGHMEM
  99. "HighMem",
  100. #endif
  101. "Movable",
  102. };
  103. int min_free_kbytes = 1024;
  104. unsigned long __meminitdata nr_kernel_pages;
  105. unsigned long __meminitdata nr_all_pages;
  106. static unsigned long __meminitdata dma_reserve;
  107. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  108. /*
  109. * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
  110. * ranges of memory (RAM) that may be registered with add_active_range().
  111. * Ranges passed to add_active_range() will be merged if possible
  112. * so the number of times add_active_range() can be called is
  113. * related to the number of nodes and the number of holes
  114. */
  115. #ifdef CONFIG_MAX_ACTIVE_REGIONS
  116. /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
  117. #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  118. #else
  119. #if MAX_NUMNODES >= 32
  120. /* If there can be many nodes, allow up to 50 holes per node */
  121. #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
  122. #else
  123. /* By default, allow up to 256 distinct regions */
  124. #define MAX_ACTIVE_REGIONS 256
  125. #endif
  126. #endif
  127. static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  128. static int __meminitdata nr_nodemap_entries;
  129. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  130. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  131. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  132. static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
  133. static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
  134. #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
  135. unsigned long __initdata required_kernelcore;
  136. unsigned long __initdata required_movablecore;
  137. unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  138. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  139. int movable_zone;
  140. EXPORT_SYMBOL(movable_zone);
  141. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  142. #if MAX_NUMNODES > 1
  143. int nr_node_ids __read_mostly = MAX_NUMNODES;
  144. EXPORT_SYMBOL(nr_node_ids);
  145. #endif
  146. #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
  147. int page_group_by_mobility_disabled __read_mostly;
  148. static inline int get_pageblock_migratetype(struct page *page)
  149. {
  150. if (unlikely(page_group_by_mobility_disabled))
  151. return MIGRATE_UNMOVABLE;
  152. return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
  153. }
  154. static void set_pageblock_migratetype(struct page *page, int migratetype)
  155. {
  156. set_pageblock_flags_group(page, (unsigned long)migratetype,
  157. PB_migrate, PB_migrate_end);
  158. }
  159. static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
  160. {
  161. WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
  162. if (unlikely(page_group_by_mobility_disabled))
  163. return MIGRATE_UNMOVABLE;
  164. /* Cluster high-order atomic allocations together */
  165. if (unlikely(order > 0) &&
  166. (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
  167. return MIGRATE_HIGHATOMIC;
  168. /* Cluster based on mobility */
  169. return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
  170. ((gfp_flags & __GFP_RECLAIMABLE) != 0);
  171. }
  172. #else
  173. static inline int get_pageblock_migratetype(struct page *page)
  174. {
  175. return MIGRATE_UNMOVABLE;
  176. }
  177. static void set_pageblock_migratetype(struct page *page, int migratetype)
  178. {
  179. }
  180. static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
  181. {
  182. return MIGRATE_UNMOVABLE;
  183. }
  184. #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
  185. #ifdef CONFIG_DEBUG_VM
  186. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  187. {
  188. int ret = 0;
  189. unsigned seq;
  190. unsigned long pfn = page_to_pfn(page);
  191. do {
  192. seq = zone_span_seqbegin(zone);
  193. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  194. ret = 1;
  195. else if (pfn < zone->zone_start_pfn)
  196. ret = 1;
  197. } while (zone_span_seqretry(zone, seq));
  198. return ret;
  199. }
  200. static int page_is_consistent(struct zone *zone, struct page *page)
  201. {
  202. if (!pfn_valid_within(page_to_pfn(page)))
  203. return 0;
  204. if (zone != page_zone(page))
  205. return 0;
  206. return 1;
  207. }
  208. /*
  209. * Temporary debugging check for pages not lying within a given zone.
  210. */
  211. static int bad_range(struct zone *zone, struct page *page)
  212. {
  213. if (page_outside_zone_boundaries(zone, page))
  214. return 1;
  215. if (!page_is_consistent(zone, page))
  216. return 1;
  217. return 0;
  218. }
  219. #else
  220. static inline int bad_range(struct zone *zone, struct page *page)
  221. {
  222. return 0;
  223. }
  224. #endif
  225. static void bad_page(struct page *page)
  226. {
  227. printk(KERN_EMERG "Bad page state in process '%s'\n"
  228. KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
  229. KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
  230. KERN_EMERG "Backtrace:\n",
  231. current->comm, page, (int)(2*sizeof(unsigned long)),
  232. (unsigned long)page->flags, page->mapping,
  233. page_mapcount(page), page_count(page));
  234. dump_stack();
  235. page->flags &= ~(1 << PG_lru |
  236. 1 << PG_private |
  237. 1 << PG_locked |
  238. 1 << PG_active |
  239. 1 << PG_dirty |
  240. 1 << PG_reclaim |
  241. 1 << PG_slab |
  242. 1 << PG_swapcache |
  243. 1 << PG_writeback |
  244. 1 << PG_buddy );
  245. set_page_count(page, 0);
  246. reset_page_mapcount(page);
  247. page->mapping = NULL;
  248. add_taint(TAINT_BAD_PAGE);
  249. }
  250. /*
  251. * Higher-order pages are called "compound pages". They are structured thusly:
  252. *
  253. * The first PAGE_SIZE page is called the "head page".
  254. *
  255. * The remaining PAGE_SIZE pages are called "tail pages".
  256. *
  257. * All pages have PG_compound set. All pages have their ->private pointing at
  258. * the head page (even the head page has this).
  259. *
  260. * The first tail page's ->lru.next holds the address of the compound page's
  261. * put_page() function. Its ->lru.prev holds the order of allocation.
  262. * This usage means that zero-order pages may not be compound.
  263. */
  264. static void free_compound_page(struct page *page)
  265. {
  266. __free_pages_ok(page, compound_order(page));
  267. }
  268. static void prep_compound_page(struct page *page, unsigned long order)
  269. {
  270. int i;
  271. int nr_pages = 1 << order;
  272. set_compound_page_dtor(page, free_compound_page);
  273. set_compound_order(page, order);
  274. __SetPageHead(page);
  275. for (i = 1; i < nr_pages; i++) {
  276. struct page *p = page + i;
  277. __SetPageTail(p);
  278. p->first_page = page;
  279. }
  280. }
  281. static void destroy_compound_page(struct page *page, unsigned long order)
  282. {
  283. int i;
  284. int nr_pages = 1 << order;
  285. if (unlikely(compound_order(page) != order))
  286. bad_page(page);
  287. if (unlikely(!PageHead(page)))
  288. bad_page(page);
  289. __ClearPageHead(page);
  290. for (i = 1; i < nr_pages; i++) {
  291. struct page *p = page + i;
  292. if (unlikely(!PageTail(p) |
  293. (p->first_page != page)))
  294. bad_page(page);
  295. __ClearPageTail(p);
  296. }
  297. }
  298. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  299. {
  300. int i;
  301. VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
  302. /*
  303. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  304. * and __GFP_HIGHMEM from hard or soft interrupt context.
  305. */
  306. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  307. for (i = 0; i < (1 << order); i++)
  308. clear_highpage(page + i);
  309. }
  310. /*
  311. * function for dealing with page's order in buddy system.
  312. * zone->lock is already acquired when we use these.
  313. * So, we don't need atomic page->flags operations here.
  314. */
  315. static inline unsigned long page_order(struct page *page)
  316. {
  317. return page_private(page);
  318. }
  319. static inline void set_page_order(struct page *page, int order)
  320. {
  321. set_page_private(page, order);
  322. __SetPageBuddy(page);
  323. }
  324. static inline void rmv_page_order(struct page *page)
  325. {
  326. __ClearPageBuddy(page);
  327. set_page_private(page, 0);
  328. }
  329. /*
  330. * Locate the struct page for both the matching buddy in our
  331. * pair (buddy1) and the combined O(n+1) page they form (page).
  332. *
  333. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  334. * the following equation:
  335. * B2 = B1 ^ (1 << O)
  336. * For example, if the starting buddy (buddy2) is #8 its order
  337. * 1 buddy is #10:
  338. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  339. *
  340. * 2) Any buddy B will have an order O+1 parent P which
  341. * satisfies the following equation:
  342. * P = B & ~(1 << O)
  343. *
  344. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  345. */
  346. static inline struct page *
  347. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  348. {
  349. unsigned long buddy_idx = page_idx ^ (1 << order);
  350. return page + (buddy_idx - page_idx);
  351. }
  352. static inline unsigned long
  353. __find_combined_index(unsigned long page_idx, unsigned int order)
  354. {
  355. return (page_idx & ~(1 << order));
  356. }
  357. /*
  358. * This function checks whether a page is free && is the buddy
  359. * we can do coalesce a page and its buddy if
  360. * (a) the buddy is not in a hole &&
  361. * (b) the buddy is in the buddy system &&
  362. * (c) a page and its buddy have the same order &&
  363. * (d) a page and its buddy are in the same zone.
  364. *
  365. * For recording whether a page is in the buddy system, we use PG_buddy.
  366. * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
  367. *
  368. * For recording page's order, we use page_private(page).
  369. */
  370. static inline int page_is_buddy(struct page *page, struct page *buddy,
  371. int order)
  372. {
  373. if (!pfn_valid_within(page_to_pfn(buddy)))
  374. return 0;
  375. if (page_zone_id(page) != page_zone_id(buddy))
  376. return 0;
  377. if (PageBuddy(buddy) && page_order(buddy) == order) {
  378. BUG_ON(page_count(buddy) != 0);
  379. return 1;
  380. }
  381. return 0;
  382. }
  383. /*
  384. * Freeing function for a buddy system allocator.
  385. *
  386. * The concept of a buddy system is to maintain direct-mapped table
  387. * (containing bit values) for memory blocks of various "orders".
  388. * The bottom level table contains the map for the smallest allocatable
  389. * units of memory (here, pages), and each level above it describes
  390. * pairs of units from the levels below, hence, "buddies".
  391. * At a high level, all that happens here is marking the table entry
  392. * at the bottom level available, and propagating the changes upward
  393. * as necessary, plus some accounting needed to play nicely with other
  394. * parts of the VM system.
  395. * At each level, we keep a list of pages, which are heads of continuous
  396. * free pages of length of (1 << order) and marked with PG_buddy. Page's
  397. * order is recorded in page_private(page) field.
  398. * So when we are allocating or freeing one, we can derive the state of the
  399. * other. That is, if we allocate a small block, and both were
  400. * free, the remainder of the region must be split into blocks.
  401. * If a block is freed, and its buddy is also free, then this
  402. * triggers coalescing into a block of larger size.
  403. *
  404. * -- wli
  405. */
  406. static inline void __free_one_page(struct page *page,
  407. struct zone *zone, unsigned int order)
  408. {
  409. unsigned long page_idx;
  410. int order_size = 1 << order;
  411. int migratetype = get_pageblock_migratetype(page);
  412. if (unlikely(PageCompound(page)))
  413. destroy_compound_page(page, order);
  414. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  415. VM_BUG_ON(page_idx & (order_size - 1));
  416. VM_BUG_ON(bad_range(zone, page));
  417. __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
  418. while (order < MAX_ORDER-1) {
  419. unsigned long combined_idx;
  420. struct page *buddy;
  421. buddy = __page_find_buddy(page, page_idx, order);
  422. if (!page_is_buddy(page, buddy, order))
  423. break; /* Move the buddy up one level. */
  424. list_del(&buddy->lru);
  425. zone->free_area[order].nr_free--;
  426. rmv_page_order(buddy);
  427. combined_idx = __find_combined_index(page_idx, order);
  428. page = page + (combined_idx - page_idx);
  429. page_idx = combined_idx;
  430. order++;
  431. }
  432. set_page_order(page, order);
  433. list_add(&page->lru,
  434. &zone->free_area[order].free_list[migratetype]);
  435. zone->free_area[order].nr_free++;
  436. }
  437. static inline int free_pages_check(struct page *page)
  438. {
  439. if (unlikely(page_mapcount(page) |
  440. (page->mapping != NULL) |
  441. (page_count(page) != 0) |
  442. (page->flags & (
  443. 1 << PG_lru |
  444. 1 << PG_private |
  445. 1 << PG_locked |
  446. 1 << PG_active |
  447. 1 << PG_slab |
  448. 1 << PG_swapcache |
  449. 1 << PG_writeback |
  450. 1 << PG_reserved |
  451. 1 << PG_buddy ))))
  452. bad_page(page);
  453. if (PageDirty(page))
  454. __ClearPageDirty(page);
  455. /*
  456. * For now, we report if PG_reserved was found set, but do not
  457. * clear it, and do not free the page. But we shall soon need
  458. * to do more, for when the ZERO_PAGE count wraps negative.
  459. */
  460. return PageReserved(page);
  461. }
  462. /*
  463. * Frees a list of pages.
  464. * Assumes all pages on list are in same zone, and of same order.
  465. * count is the number of pages to free.
  466. *
  467. * If the zone was previously in an "all pages pinned" state then look to
  468. * see if this freeing clears that state.
  469. *
  470. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  471. * pinned" detection logic.
  472. */
  473. static void free_pages_bulk(struct zone *zone, int count,
  474. struct list_head *list, int order)
  475. {
  476. spin_lock(&zone->lock);
  477. zone->all_unreclaimable = 0;
  478. zone->pages_scanned = 0;
  479. while (count--) {
  480. struct page *page;
  481. VM_BUG_ON(list_empty(list));
  482. page = list_entry(list->prev, struct page, lru);
  483. /* have to delete it as __free_one_page list manipulates */
  484. list_del(&page->lru);
  485. __free_one_page(page, zone, order);
  486. }
  487. spin_unlock(&zone->lock);
  488. }
  489. static void free_one_page(struct zone *zone, struct page *page, int order)
  490. {
  491. spin_lock(&zone->lock);
  492. zone->all_unreclaimable = 0;
  493. zone->pages_scanned = 0;
  494. __free_one_page(page, zone, order);
  495. spin_unlock(&zone->lock);
  496. }
  497. static void __free_pages_ok(struct page *page, unsigned int order)
  498. {
  499. unsigned long flags;
  500. int i;
  501. int reserved = 0;
  502. for (i = 0 ; i < (1 << order) ; ++i)
  503. reserved += free_pages_check(page + i);
  504. if (reserved)
  505. return;
  506. if (!PageHighMem(page))
  507. debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  508. arch_free_page(page, order);
  509. kernel_map_pages(page, 1 << order, 0);
  510. local_irq_save(flags);
  511. __count_vm_events(PGFREE, 1 << order);
  512. free_one_page(page_zone(page), page, order);
  513. local_irq_restore(flags);
  514. }
  515. /*
  516. * permit the bootmem allocator to evade page validation on high-order frees
  517. */
  518. void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
  519. {
  520. if (order == 0) {
  521. __ClearPageReserved(page);
  522. set_page_count(page, 0);
  523. set_page_refcounted(page);
  524. __free_page(page);
  525. } else {
  526. int loop;
  527. prefetchw(page);
  528. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  529. struct page *p = &page[loop];
  530. if (loop + 1 < BITS_PER_LONG)
  531. prefetchw(p + 1);
  532. __ClearPageReserved(p);
  533. set_page_count(p, 0);
  534. }
  535. set_page_refcounted(page);
  536. __free_pages(page, order);
  537. }
  538. }
  539. /*
  540. * The order of subdivision here is critical for the IO subsystem.
  541. * Please do not alter this order without good reasons and regression
  542. * testing. Specifically, as large blocks of memory are subdivided,
  543. * the order in which smaller blocks are delivered depends on the order
  544. * they're subdivided in this function. This is the primary factor
  545. * influencing the order in which pages are delivered to the IO
  546. * subsystem according to empirical testing, and this is also justified
  547. * by considering the behavior of a buddy system containing a single
  548. * large block of memory acted on by a series of small allocations.
  549. * This behavior is a critical factor in sglist merging's success.
  550. *
  551. * -- wli
  552. */
  553. static inline void expand(struct zone *zone, struct page *page,
  554. int low, int high, struct free_area *area,
  555. int migratetype)
  556. {
  557. unsigned long size = 1 << high;
  558. while (high > low) {
  559. area--;
  560. high--;
  561. size >>= 1;
  562. VM_BUG_ON(bad_range(zone, &page[size]));
  563. list_add(&page[size].lru, &area->free_list[migratetype]);
  564. area->nr_free++;
  565. set_page_order(&page[size], high);
  566. }
  567. }
  568. /*
  569. * This page is about to be returned from the page allocator
  570. */
  571. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  572. {
  573. if (unlikely(page_mapcount(page) |
  574. (page->mapping != NULL) |
  575. (page_count(page) != 0) |
  576. (page->flags & (
  577. 1 << PG_lru |
  578. 1 << PG_private |
  579. 1 << PG_locked |
  580. 1 << PG_active |
  581. 1 << PG_dirty |
  582. 1 << PG_slab |
  583. 1 << PG_swapcache |
  584. 1 << PG_writeback |
  585. 1 << PG_reserved |
  586. 1 << PG_buddy ))))
  587. bad_page(page);
  588. /*
  589. * For now, we report if PG_reserved was found set, but do not
  590. * clear it, and do not allocate the page: as a safety net.
  591. */
  592. if (PageReserved(page))
  593. return 1;
  594. page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
  595. 1 << PG_referenced | 1 << PG_arch_1 |
  596. 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
  597. set_page_private(page, 0);
  598. set_page_refcounted(page);
  599. arch_alloc_page(page, order);
  600. kernel_map_pages(page, 1 << order, 1);
  601. if (gfp_flags & __GFP_ZERO)
  602. prep_zero_page(page, order, gfp_flags);
  603. if (order && (gfp_flags & __GFP_COMP))
  604. prep_compound_page(page, order);
  605. return 0;
  606. }
  607. #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
  608. /*
  609. * This array describes the order lists are fallen back to when
  610. * the free lists for the desirable migrate type are depleted
  611. */
  612. static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  613. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
  614. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC },
  615. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_HIGHATOMIC },
  616. [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,MIGRATE_MOVABLE},
  617. };
  618. /*
  619. * Move the free pages in a range to the free lists of the requested type.
  620. * Note that start_page and end_pages are not aligned in a MAX_ORDER_NR_PAGES
  621. * boundary. If alignment is required, use move_freepages_block()
  622. */
  623. int move_freepages(struct zone *zone,
  624. struct page *start_page, struct page *end_page,
  625. int migratetype)
  626. {
  627. struct page *page;
  628. unsigned long order;
  629. int blocks_moved = 0;
  630. #ifndef CONFIG_HOLES_IN_ZONE
  631. /*
  632. * page_zone is not safe to call in this context when
  633. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  634. * anyway as we check zone boundaries in move_freepages_block().
  635. * Remove at a later date when no bug reports exist related to
  636. * CONFIG_PAGE_GROUP_BY_MOBILITY
  637. */
  638. BUG_ON(page_zone(start_page) != page_zone(end_page));
  639. #endif
  640. for (page = start_page; page <= end_page;) {
  641. if (!pfn_valid_within(page_to_pfn(page))) {
  642. page++;
  643. continue;
  644. }
  645. if (!PageBuddy(page)) {
  646. page++;
  647. continue;
  648. }
  649. order = page_order(page);
  650. list_del(&page->lru);
  651. list_add(&page->lru,
  652. &zone->free_area[order].free_list[migratetype]);
  653. page += 1 << order;
  654. blocks_moved++;
  655. }
  656. return blocks_moved;
  657. }
  658. int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
  659. {
  660. unsigned long start_pfn, end_pfn;
  661. struct page *start_page, *end_page;
  662. start_pfn = page_to_pfn(page);
  663. start_pfn = start_pfn & ~(MAX_ORDER_NR_PAGES-1);
  664. start_page = pfn_to_page(start_pfn);
  665. end_page = start_page + MAX_ORDER_NR_PAGES - 1;
  666. end_pfn = start_pfn + MAX_ORDER_NR_PAGES - 1;
  667. /* Do not cross zone boundaries */
  668. if (start_pfn < zone->zone_start_pfn)
  669. start_page = page;
  670. if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
  671. return 0;
  672. return move_freepages(zone, start_page, end_page, migratetype);
  673. }
  674. /* Return the page with the lowest PFN in the list */
  675. static struct page *min_page(struct list_head *list)
  676. {
  677. unsigned long min_pfn = -1UL;
  678. struct page *min_page = NULL, *page;;
  679. list_for_each_entry(page, list, lru) {
  680. unsigned long pfn = page_to_pfn(page);
  681. if (pfn < min_pfn) {
  682. min_pfn = pfn;
  683. min_page = page;
  684. }
  685. }
  686. return min_page;
  687. }
  688. /* Remove an element from the buddy allocator from the fallback list */
  689. static struct page *__rmqueue_fallback(struct zone *zone, int order,
  690. int start_migratetype)
  691. {
  692. struct free_area * area;
  693. int current_order;
  694. struct page *page;
  695. int migratetype, i;
  696. int nonatomic_fallback_atomic = 0;
  697. retry:
  698. /* Find the largest possible block of pages in the other list */
  699. for (current_order = MAX_ORDER-1; current_order >= order;
  700. --current_order) {
  701. for (i = 0; i < MIGRATE_TYPES - 1; i++) {
  702. migratetype = fallbacks[start_migratetype][i];
  703. /*
  704. * Make it hard to fallback to blocks used for
  705. * high-order atomic allocations
  706. */
  707. if (migratetype == MIGRATE_HIGHATOMIC &&
  708. start_migratetype != MIGRATE_UNMOVABLE &&
  709. !nonatomic_fallback_atomic)
  710. continue;
  711. area = &(zone->free_area[current_order]);
  712. if (list_empty(&area->free_list[migratetype]))
  713. continue;
  714. /* Bias kernel allocations towards low pfns */
  715. page = list_entry(area->free_list[migratetype].next,
  716. struct page, lru);
  717. if (unlikely(start_migratetype != MIGRATE_MOVABLE))
  718. page = min_page(&area->free_list[migratetype]);
  719. area->nr_free--;
  720. /*
  721. * If breaking a large block of pages, move all free
  722. * pages to the preferred allocation list. If falling
  723. * back for a reclaimable kernel allocation, be more
  724. * agressive about taking ownership of free pages
  725. */
  726. if (unlikely(current_order >= MAX_ORDER / 2) ||
  727. start_migratetype == MIGRATE_RECLAIMABLE) {
  728. unsigned long pages;
  729. pages = move_freepages_block(zone, page,
  730. start_migratetype);
  731. /* Claim the whole block if over half of it is free */
  732. if ((pages << current_order) >= (1 << (MAX_ORDER-2)) &&
  733. migratetype != MIGRATE_HIGHATOMIC)
  734. set_pageblock_migratetype(page,
  735. start_migratetype);
  736. migratetype = start_migratetype;
  737. }
  738. /* Remove the page from the freelists */
  739. list_del(&page->lru);
  740. rmv_page_order(page);
  741. __mod_zone_page_state(zone, NR_FREE_PAGES,
  742. -(1UL << order));
  743. if (current_order == MAX_ORDER - 1)
  744. set_pageblock_migratetype(page,
  745. start_migratetype);
  746. expand(zone, page, order, current_order, area, migratetype);
  747. return page;
  748. }
  749. }
  750. /* Allow fallback to high-order atomic blocks if memory is that low */
  751. if (!nonatomic_fallback_atomic) {
  752. nonatomic_fallback_atomic = 1;
  753. goto retry;
  754. }
  755. return NULL;
  756. }
  757. #else
  758. static struct page *__rmqueue_fallback(struct zone *zone, int order,
  759. int start_migratetype)
  760. {
  761. return NULL;
  762. }
  763. #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
  764. /*
  765. * Do the hard work of removing an element from the buddy allocator.
  766. * Call me with the zone->lock already held.
  767. */
  768. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  769. int migratetype)
  770. {
  771. struct free_area * area;
  772. unsigned int current_order;
  773. struct page *page;
  774. /* Find a page of the appropriate size in the preferred list */
  775. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  776. area = &(zone->free_area[current_order]);
  777. if (list_empty(&area->free_list[migratetype]))
  778. continue;
  779. page = list_entry(area->free_list[migratetype].next,
  780. struct page, lru);
  781. list_del(&page->lru);
  782. rmv_page_order(page);
  783. area->nr_free--;
  784. __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
  785. expand(zone, page, order, current_order, area, migratetype);
  786. goto got_page;
  787. }
  788. page = __rmqueue_fallback(zone, order, migratetype);
  789. got_page:
  790. return page;
  791. }
  792. /*
  793. * Obtain a specified number of elements from the buddy allocator, all under
  794. * a single hold of the lock, for efficiency. Add them to the supplied list.
  795. * Returns the number of new pages which were placed at *list.
  796. */
  797. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  798. unsigned long count, struct list_head *list,
  799. int migratetype)
  800. {
  801. int i;
  802. spin_lock(&zone->lock);
  803. for (i = 0; i < count; ++i) {
  804. struct page *page = __rmqueue(zone, order, migratetype);
  805. if (unlikely(page == NULL))
  806. break;
  807. list_add(&page->lru, list);
  808. set_page_private(page, migratetype);
  809. }
  810. spin_unlock(&zone->lock);
  811. return i;
  812. }
  813. #ifdef CONFIG_NUMA
  814. /*
  815. * Called from the vmstat counter updater to drain pagesets of this
  816. * currently executing processor on remote nodes after they have
  817. * expired.
  818. *
  819. * Note that this function must be called with the thread pinned to
  820. * a single processor.
  821. */
  822. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  823. {
  824. unsigned long flags;
  825. int to_drain;
  826. local_irq_save(flags);
  827. if (pcp->count >= pcp->batch)
  828. to_drain = pcp->batch;
  829. else
  830. to_drain = pcp->count;
  831. free_pages_bulk(zone, to_drain, &pcp->list, 0);
  832. pcp->count -= to_drain;
  833. local_irq_restore(flags);
  834. }
  835. #endif
  836. static void __drain_pages(unsigned int cpu)
  837. {
  838. unsigned long flags;
  839. struct zone *zone;
  840. int i;
  841. for_each_zone(zone) {
  842. struct per_cpu_pageset *pset;
  843. if (!populated_zone(zone))
  844. continue;
  845. pset = zone_pcp(zone, cpu);
  846. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  847. struct per_cpu_pages *pcp;
  848. pcp = &pset->pcp[i];
  849. local_irq_save(flags);
  850. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  851. pcp->count = 0;
  852. local_irq_restore(flags);
  853. }
  854. }
  855. }
  856. #ifdef CONFIG_HIBERNATION
  857. void mark_free_pages(struct zone *zone)
  858. {
  859. unsigned long pfn, max_zone_pfn;
  860. unsigned long flags;
  861. int order, t;
  862. struct list_head *curr;
  863. if (!zone->spanned_pages)
  864. return;
  865. spin_lock_irqsave(&zone->lock, flags);
  866. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  867. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  868. if (pfn_valid(pfn)) {
  869. struct page *page = pfn_to_page(pfn);
  870. if (!swsusp_page_is_forbidden(page))
  871. swsusp_unset_page_free(page);
  872. }
  873. for_each_migratetype_order(order, t) {
  874. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  875. unsigned long i;
  876. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  877. for (i = 0; i < (1UL << order); i++)
  878. swsusp_set_page_free(pfn_to_page(pfn + i));
  879. }
  880. }
  881. spin_unlock_irqrestore(&zone->lock, flags);
  882. }
  883. #endif /* CONFIG_PM */
  884. #if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
  885. /*
  886. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  887. */
  888. void drain_local_pages(void)
  889. {
  890. unsigned long flags;
  891. local_irq_save(flags);
  892. __drain_pages(smp_processor_id());
  893. local_irq_restore(flags);
  894. }
  895. void smp_drain_local_pages(void *arg)
  896. {
  897. drain_local_pages();
  898. }
  899. /*
  900. * Spill all the per-cpu pages from all CPUs back into the buddy allocator
  901. */
  902. void drain_all_local_pages(void)
  903. {
  904. unsigned long flags;
  905. local_irq_save(flags);
  906. __drain_pages(smp_processor_id());
  907. local_irq_restore(flags);
  908. smp_call_function(smp_drain_local_pages, NULL, 0, 1);
  909. }
  910. #else
  911. void drain_all_local_pages(void) {}
  912. #endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
  913. /*
  914. * Free a 0-order page
  915. */
  916. static void fastcall free_hot_cold_page(struct page *page, int cold)
  917. {
  918. struct zone *zone = page_zone(page);
  919. struct per_cpu_pages *pcp;
  920. unsigned long flags;
  921. if (PageAnon(page))
  922. page->mapping = NULL;
  923. if (free_pages_check(page))
  924. return;
  925. if (!PageHighMem(page))
  926. debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
  927. arch_free_page(page, 0);
  928. kernel_map_pages(page, 1, 0);
  929. pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
  930. local_irq_save(flags);
  931. __count_vm_event(PGFREE);
  932. list_add(&page->lru, &pcp->list);
  933. set_page_private(page, get_pageblock_migratetype(page));
  934. pcp->count++;
  935. if (pcp->count >= pcp->high) {
  936. free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
  937. pcp->count -= pcp->batch;
  938. }
  939. local_irq_restore(flags);
  940. put_cpu();
  941. }
  942. void fastcall free_hot_page(struct page *page)
  943. {
  944. free_hot_cold_page(page, 0);
  945. }
  946. void fastcall free_cold_page(struct page *page)
  947. {
  948. free_hot_cold_page(page, 1);
  949. }
  950. /*
  951. * split_page takes a non-compound higher-order page, and splits it into
  952. * n (1<<order) sub-pages: page[0..n]
  953. * Each sub-page must be freed individually.
  954. *
  955. * Note: this is probably too low level an operation for use in drivers.
  956. * Please consult with lkml before using this in your driver.
  957. */
  958. void split_page(struct page *page, unsigned int order)
  959. {
  960. int i;
  961. VM_BUG_ON(PageCompound(page));
  962. VM_BUG_ON(!page_count(page));
  963. for (i = 1; i < (1 << order); i++)
  964. set_page_refcounted(page + i);
  965. }
  966. /*
  967. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  968. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  969. * or two.
  970. */
  971. static struct page *buffered_rmqueue(struct zonelist *zonelist,
  972. struct zone *zone, int order, gfp_t gfp_flags)
  973. {
  974. unsigned long flags;
  975. struct page *page;
  976. int cold = !!(gfp_flags & __GFP_COLD);
  977. int cpu;
  978. int migratetype = allocflags_to_migratetype(gfp_flags, order);
  979. again:
  980. cpu = get_cpu();
  981. if (likely(order == 0)) {
  982. struct per_cpu_pages *pcp;
  983. pcp = &zone_pcp(zone, cpu)->pcp[cold];
  984. local_irq_save(flags);
  985. if (!pcp->count) {
  986. pcp->count = rmqueue_bulk(zone, 0,
  987. pcp->batch, &pcp->list, migratetype);
  988. if (unlikely(!pcp->count))
  989. goto failed;
  990. }
  991. #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
  992. /* Find a page of the appropriate migrate type */
  993. list_for_each_entry(page, &pcp->list, lru)
  994. if (page_private(page) == migratetype)
  995. break;
  996. /* Allocate more to the pcp list if necessary */
  997. if (unlikely(&page->lru == &pcp->list)) {
  998. pcp->count += rmqueue_bulk(zone, 0,
  999. pcp->batch, &pcp->list, migratetype);
  1000. page = list_entry(pcp->list.next, struct page, lru);
  1001. }
  1002. #else
  1003. page = list_entry(pcp->list.next, struct page, lru);
  1004. #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
  1005. list_del(&page->lru);
  1006. pcp->count--;
  1007. } else {
  1008. spin_lock_irqsave(&zone->lock, flags);
  1009. page = __rmqueue(zone, order, migratetype);
  1010. spin_unlock(&zone->lock);
  1011. if (!page)
  1012. goto failed;
  1013. }
  1014. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  1015. zone_statistics(zonelist, zone);
  1016. local_irq_restore(flags);
  1017. put_cpu();
  1018. VM_BUG_ON(bad_range(zone, page));
  1019. if (prep_new_page(page, order, gfp_flags))
  1020. goto again;
  1021. return page;
  1022. failed:
  1023. local_irq_restore(flags);
  1024. put_cpu();
  1025. return NULL;
  1026. }
  1027. #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
  1028. #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
  1029. #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
  1030. #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
  1031. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  1032. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  1033. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  1034. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1035. static struct fail_page_alloc_attr {
  1036. struct fault_attr attr;
  1037. u32 ignore_gfp_highmem;
  1038. u32 ignore_gfp_wait;
  1039. u32 min_order;
  1040. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1041. struct dentry *ignore_gfp_highmem_file;
  1042. struct dentry *ignore_gfp_wait_file;
  1043. struct dentry *min_order_file;
  1044. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1045. } fail_page_alloc = {
  1046. .attr = FAULT_ATTR_INITIALIZER,
  1047. .ignore_gfp_wait = 1,
  1048. .ignore_gfp_highmem = 1,
  1049. .min_order = 1,
  1050. };
  1051. static int __init setup_fail_page_alloc(char *str)
  1052. {
  1053. return setup_fault_attr(&fail_page_alloc.attr, str);
  1054. }
  1055. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1056. static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1057. {
  1058. if (order < fail_page_alloc.min_order)
  1059. return 0;
  1060. if (gfp_mask & __GFP_NOFAIL)
  1061. return 0;
  1062. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1063. return 0;
  1064. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1065. return 0;
  1066. return should_fail(&fail_page_alloc.attr, 1 << order);
  1067. }
  1068. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1069. static int __init fail_page_alloc_debugfs(void)
  1070. {
  1071. mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1072. struct dentry *dir;
  1073. int err;
  1074. err = init_fault_attr_dentries(&fail_page_alloc.attr,
  1075. "fail_page_alloc");
  1076. if (err)
  1077. return err;
  1078. dir = fail_page_alloc.attr.dentries.dir;
  1079. fail_page_alloc.ignore_gfp_wait_file =
  1080. debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1081. &fail_page_alloc.ignore_gfp_wait);
  1082. fail_page_alloc.ignore_gfp_highmem_file =
  1083. debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1084. &fail_page_alloc.ignore_gfp_highmem);
  1085. fail_page_alloc.min_order_file =
  1086. debugfs_create_u32("min-order", mode, dir,
  1087. &fail_page_alloc.min_order);
  1088. if (!fail_page_alloc.ignore_gfp_wait_file ||
  1089. !fail_page_alloc.ignore_gfp_highmem_file ||
  1090. !fail_page_alloc.min_order_file) {
  1091. err = -ENOMEM;
  1092. debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
  1093. debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
  1094. debugfs_remove(fail_page_alloc.min_order_file);
  1095. cleanup_fault_attr_dentries(&fail_page_alloc.attr);
  1096. }
  1097. return err;
  1098. }
  1099. late_initcall(fail_page_alloc_debugfs);
  1100. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1101. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1102. static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1103. {
  1104. return 0;
  1105. }
  1106. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1107. /*
  1108. * Return 1 if free pages are above 'mark'. This takes into account the order
  1109. * of the allocation.
  1110. */
  1111. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  1112. int classzone_idx, int alloc_flags)
  1113. {
  1114. /* free_pages my go negative - that's OK */
  1115. long min = mark;
  1116. long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
  1117. int o;
  1118. if (alloc_flags & ALLOC_HIGH)
  1119. min -= min / 2;
  1120. if (alloc_flags & ALLOC_HARDER)
  1121. min -= min / 4;
  1122. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  1123. return 0;
  1124. for (o = 0; o < order; o++) {
  1125. /* At the next order, this order's pages become unavailable */
  1126. free_pages -= z->free_area[o].nr_free << o;
  1127. /* Require fewer higher order pages to be free */
  1128. min >>= 1;
  1129. if (free_pages <= min)
  1130. return 0;
  1131. }
  1132. return 1;
  1133. }
  1134. #ifdef CONFIG_NUMA
  1135. /*
  1136. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1137. * skip over zones that are not allowed by the cpuset, or that have
  1138. * been recently (in last second) found to be nearly full. See further
  1139. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1140. * that have to skip over alot of full or unallowed zones.
  1141. *
  1142. * If the zonelist cache is present in the passed in zonelist, then
  1143. * returns a pointer to the allowed node mask (either the current
  1144. * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
  1145. *
  1146. * If the zonelist cache is not available for this zonelist, does
  1147. * nothing and returns NULL.
  1148. *
  1149. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1150. * a second since last zap'd) then we zap it out (clear its bits.)
  1151. *
  1152. * We hold off even calling zlc_setup, until after we've checked the
  1153. * first zone in the zonelist, on the theory that most allocations will
  1154. * be satisfied from that first zone, so best to examine that zone as
  1155. * quickly as we can.
  1156. */
  1157. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1158. {
  1159. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1160. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1161. zlc = zonelist->zlcache_ptr;
  1162. if (!zlc)
  1163. return NULL;
  1164. if (jiffies - zlc->last_full_zap > 1 * HZ) {
  1165. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1166. zlc->last_full_zap = jiffies;
  1167. }
  1168. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1169. &cpuset_current_mems_allowed :
  1170. &node_states[N_HIGH_MEMORY];
  1171. return allowednodes;
  1172. }
  1173. /*
  1174. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1175. * if it is worth looking at further for free memory:
  1176. * 1) Check that the zone isn't thought to be full (doesn't have its
  1177. * bit set in the zonelist_cache fullzones BITMAP).
  1178. * 2) Check that the zones node (obtained from the zonelist_cache
  1179. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1180. * Return true (non-zero) if zone is worth looking at further, or
  1181. * else return false (zero) if it is not.
  1182. *
  1183. * This check -ignores- the distinction between various watermarks,
  1184. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1185. * found to be full for any variation of these watermarks, it will
  1186. * be considered full for up to one second by all requests, unless
  1187. * we are so low on memory on all allowed nodes that we are forced
  1188. * into the second scan of the zonelist.
  1189. *
  1190. * In the second scan we ignore this zonelist cache and exactly
  1191. * apply the watermarks to all zones, even it is slower to do so.
  1192. * We are low on memory in the second scan, and should leave no stone
  1193. * unturned looking for a free page.
  1194. */
  1195. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
  1196. nodemask_t *allowednodes)
  1197. {
  1198. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1199. int i; /* index of *z in zonelist zones */
  1200. int n; /* node that zone *z is on */
  1201. zlc = zonelist->zlcache_ptr;
  1202. if (!zlc)
  1203. return 1;
  1204. i = z - zonelist->zones;
  1205. n = zlc->z_to_n[i];
  1206. /* This zone is worth trying if it is allowed but not full */
  1207. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1208. }
  1209. /*
  1210. * Given 'z' scanning a zonelist, set the corresponding bit in
  1211. * zlc->fullzones, so that subsequent attempts to allocate a page
  1212. * from that zone don't waste time re-examining it.
  1213. */
  1214. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
  1215. {
  1216. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1217. int i; /* index of *z in zonelist zones */
  1218. zlc = zonelist->zlcache_ptr;
  1219. if (!zlc)
  1220. return;
  1221. i = z - zonelist->zones;
  1222. set_bit(i, zlc->fullzones);
  1223. }
  1224. #else /* CONFIG_NUMA */
  1225. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1226. {
  1227. return NULL;
  1228. }
  1229. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
  1230. nodemask_t *allowednodes)
  1231. {
  1232. return 1;
  1233. }
  1234. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
  1235. {
  1236. }
  1237. #endif /* CONFIG_NUMA */
  1238. /*
  1239. * get_page_from_freelist goes through the zonelist trying to allocate
  1240. * a page.
  1241. */
  1242. static struct page *
  1243. get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
  1244. struct zonelist *zonelist, int alloc_flags)
  1245. {
  1246. struct zone **z;
  1247. struct page *page = NULL;
  1248. int classzone_idx = zone_idx(zonelist->zones[0]);
  1249. struct zone *zone;
  1250. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1251. int zlc_active = 0; /* set if using zonelist_cache */
  1252. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1253. enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
  1254. zonelist_scan:
  1255. /*
  1256. * Scan zonelist, looking for a zone with enough free.
  1257. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1258. */
  1259. z = zonelist->zones;
  1260. do {
  1261. /*
  1262. * In NUMA, this could be a policy zonelist which contains
  1263. * zones that may not be allowed by the current gfp_mask.
  1264. * Check the zone is allowed by the current flags
  1265. */
  1266. if (unlikely(alloc_should_filter_zonelist(zonelist))) {
  1267. if (highest_zoneidx == -1)
  1268. highest_zoneidx = gfp_zone(gfp_mask);
  1269. if (zone_idx(*z) > highest_zoneidx)
  1270. continue;
  1271. }
  1272. if (NUMA_BUILD && zlc_active &&
  1273. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1274. continue;
  1275. zone = *z;
  1276. if ((alloc_flags & ALLOC_CPUSET) &&
  1277. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1278. goto try_next_zone;
  1279. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  1280. unsigned long mark;
  1281. if (alloc_flags & ALLOC_WMARK_MIN)
  1282. mark = zone->pages_min;
  1283. else if (alloc_flags & ALLOC_WMARK_LOW)
  1284. mark = zone->pages_low;
  1285. else
  1286. mark = zone->pages_high;
  1287. if (!zone_watermark_ok(zone, order, mark,
  1288. classzone_idx, alloc_flags)) {
  1289. if (!zone_reclaim_mode ||
  1290. !zone_reclaim(zone, gfp_mask, order))
  1291. goto this_zone_full;
  1292. }
  1293. }
  1294. page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
  1295. if (page)
  1296. break;
  1297. this_zone_full:
  1298. if (NUMA_BUILD)
  1299. zlc_mark_zone_full(zonelist, z);
  1300. try_next_zone:
  1301. if (NUMA_BUILD && !did_zlc_setup) {
  1302. /* we do zlc_setup after the first zone is tried */
  1303. allowednodes = zlc_setup(zonelist, alloc_flags);
  1304. zlc_active = 1;
  1305. did_zlc_setup = 1;
  1306. }
  1307. } while (*(++z) != NULL);
  1308. if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
  1309. /* Disable zlc cache for second zonelist scan */
  1310. zlc_active = 0;
  1311. goto zonelist_scan;
  1312. }
  1313. return page;
  1314. }
  1315. /*
  1316. * This is the 'heart' of the zoned buddy allocator.
  1317. */
  1318. struct page * fastcall
  1319. __alloc_pages(gfp_t gfp_mask, unsigned int order,
  1320. struct zonelist *zonelist)
  1321. {
  1322. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1323. struct zone **z;
  1324. struct page *page;
  1325. struct reclaim_state reclaim_state;
  1326. struct task_struct *p = current;
  1327. int do_retry;
  1328. int alloc_flags;
  1329. int did_some_progress;
  1330. might_sleep_if(wait);
  1331. if (should_fail_alloc_page(gfp_mask, order))
  1332. return NULL;
  1333. restart:
  1334. z = zonelist->zones; /* the list of zones suitable for gfp_mask */
  1335. if (unlikely(*z == NULL)) {
  1336. /*
  1337. * Happens if we have an empty zonelist as a result of
  1338. * GFP_THISNODE being used on a memoryless node
  1339. */
  1340. return NULL;
  1341. }
  1342. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  1343. zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
  1344. if (page)
  1345. goto got_pg;
  1346. /*
  1347. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  1348. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  1349. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  1350. * using a larger set of nodes after it has established that the
  1351. * allowed per node queues are empty and that nodes are
  1352. * over allocated.
  1353. */
  1354. if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  1355. goto nopage;
  1356. for (z = zonelist->zones; *z; z++)
  1357. wakeup_kswapd(*z, order);
  1358. /*
  1359. * OK, we're below the kswapd watermark and have kicked background
  1360. * reclaim. Now things get more complex, so set up alloc_flags according
  1361. * to how we want to proceed.
  1362. *
  1363. * The caller may dip into page reserves a bit more if the caller
  1364. * cannot run direct reclaim, or if the caller has realtime scheduling
  1365. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  1366. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  1367. */
  1368. alloc_flags = ALLOC_WMARK_MIN;
  1369. if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
  1370. alloc_flags |= ALLOC_HARDER;
  1371. if (gfp_mask & __GFP_HIGH)
  1372. alloc_flags |= ALLOC_HIGH;
  1373. if (wait)
  1374. alloc_flags |= ALLOC_CPUSET;
  1375. /*
  1376. * Go through the zonelist again. Let __GFP_HIGH and allocations
  1377. * coming from realtime tasks go deeper into reserves.
  1378. *
  1379. * This is the last chance, in general, before the goto nopage.
  1380. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  1381. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1382. */
  1383. page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
  1384. if (page)
  1385. goto got_pg;
  1386. /* This allocation should allow future memory freeing. */
  1387. rebalance:
  1388. if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
  1389. && !in_interrupt()) {
  1390. if (!(gfp_mask & __GFP_NOMEMALLOC)) {
  1391. nofail_alloc:
  1392. /* go through the zonelist yet again, ignoring mins */
  1393. page = get_page_from_freelist(gfp_mask, order,
  1394. zonelist, ALLOC_NO_WATERMARKS);
  1395. if (page)
  1396. goto got_pg;
  1397. if (gfp_mask & __GFP_NOFAIL) {
  1398. congestion_wait(WRITE, HZ/50);
  1399. goto nofail_alloc;
  1400. }
  1401. }
  1402. goto nopage;
  1403. }
  1404. /* Atomic allocations - we can't balance anything */
  1405. if (!wait)
  1406. goto nopage;
  1407. cond_resched();
  1408. /* We now go into synchronous reclaim */
  1409. cpuset_memory_pressure_bump();
  1410. p->flags |= PF_MEMALLOC;
  1411. reclaim_state.reclaimed_slab = 0;
  1412. p->reclaim_state = &reclaim_state;
  1413. did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
  1414. p->reclaim_state = NULL;
  1415. p->flags &= ~PF_MEMALLOC;
  1416. cond_resched();
  1417. if (order != 0)
  1418. drain_all_local_pages();
  1419. if (likely(did_some_progress)) {
  1420. page = get_page_from_freelist(gfp_mask, order,
  1421. zonelist, alloc_flags);
  1422. if (page)
  1423. goto got_pg;
  1424. } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  1425. /*
  1426. * Go through the zonelist yet one more time, keep
  1427. * very high watermark here, this is only to catch
  1428. * a parallel oom killing, we must fail if we're still
  1429. * under heavy pressure.
  1430. */
  1431. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  1432. zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
  1433. if (page)
  1434. goto got_pg;
  1435. /* The OOM killer will not help higher order allocs so fail */
  1436. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1437. goto nopage;
  1438. out_of_memory(zonelist, gfp_mask, order);
  1439. goto restart;
  1440. }
  1441. /*
  1442. * Don't let big-order allocations loop unless the caller explicitly
  1443. * requests that. Wait for some write requests to complete then retry.
  1444. *
  1445. * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
  1446. * <= 3, but that may not be true in other implementations.
  1447. */
  1448. do_retry = 0;
  1449. if (!(gfp_mask & __GFP_NORETRY)) {
  1450. if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
  1451. (gfp_mask & __GFP_REPEAT))
  1452. do_retry = 1;
  1453. if (gfp_mask & __GFP_NOFAIL)
  1454. do_retry = 1;
  1455. }
  1456. if (do_retry) {
  1457. congestion_wait(WRITE, HZ/50);
  1458. goto rebalance;
  1459. }
  1460. nopage:
  1461. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  1462. printk(KERN_WARNING "%s: page allocation failure."
  1463. " order:%d, mode:0x%x\n",
  1464. p->comm, order, gfp_mask);
  1465. dump_stack();
  1466. show_mem();
  1467. }
  1468. got_pg:
  1469. return page;
  1470. }
  1471. EXPORT_SYMBOL(__alloc_pages);
  1472. /*
  1473. * Common helper functions.
  1474. */
  1475. fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  1476. {
  1477. struct page * page;
  1478. page = alloc_pages(gfp_mask, order);
  1479. if (!page)
  1480. return 0;
  1481. return (unsigned long) page_address(page);
  1482. }
  1483. EXPORT_SYMBOL(__get_free_pages);
  1484. fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
  1485. {
  1486. struct page * page;
  1487. /*
  1488. * get_zeroed_page() returns a 32-bit address, which cannot represent
  1489. * a highmem page
  1490. */
  1491. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  1492. page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
  1493. if (page)
  1494. return (unsigned long) page_address(page);
  1495. return 0;
  1496. }
  1497. EXPORT_SYMBOL(get_zeroed_page);
  1498. void __pagevec_free(struct pagevec *pvec)
  1499. {
  1500. int i = pagevec_count(pvec);
  1501. while (--i >= 0)
  1502. free_hot_cold_page(pvec->pages[i], pvec->cold);
  1503. }
  1504. fastcall void __free_pages(struct page *page, unsigned int order)
  1505. {
  1506. if (put_page_testzero(page)) {
  1507. if (order == 0)
  1508. free_hot_page(page);
  1509. else
  1510. __free_pages_ok(page, order);
  1511. }
  1512. }
  1513. EXPORT_SYMBOL(__free_pages);
  1514. fastcall void free_pages(unsigned long addr, unsigned int order)
  1515. {
  1516. if (addr != 0) {
  1517. VM_BUG_ON(!virt_addr_valid((void *)addr));
  1518. __free_pages(virt_to_page((void *)addr), order);
  1519. }
  1520. }
  1521. EXPORT_SYMBOL(free_pages);
  1522. static unsigned int nr_free_zone_pages(int offset)
  1523. {
  1524. /* Just pick one node, since fallback list is circular */
  1525. pg_data_t *pgdat = NODE_DATA(numa_node_id());
  1526. unsigned int sum = 0;
  1527. struct zonelist *zonelist = pgdat->node_zonelists + offset;
  1528. struct zone **zonep = zonelist->zones;
  1529. struct zone *zone;
  1530. for (zone = *zonep++; zone; zone = *zonep++) {
  1531. unsigned long size = zone->present_pages;
  1532. unsigned long high = zone->pages_high;
  1533. if (size > high)
  1534. sum += size - high;
  1535. }
  1536. return sum;
  1537. }
  1538. /*
  1539. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1540. */
  1541. unsigned int nr_free_buffer_pages(void)
  1542. {
  1543. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1544. }
  1545. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  1546. /*
  1547. * Amount of free RAM allocatable within all zones
  1548. */
  1549. unsigned int nr_free_pagecache_pages(void)
  1550. {
  1551. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  1552. }
  1553. static inline void show_node(struct zone *zone)
  1554. {
  1555. if (NUMA_BUILD)
  1556. printk("Node %d ", zone_to_nid(zone));
  1557. }
  1558. void si_meminfo(struct sysinfo *val)
  1559. {
  1560. val->totalram = totalram_pages;
  1561. val->sharedram = 0;
  1562. val->freeram = global_page_state(NR_FREE_PAGES);
  1563. val->bufferram = nr_blockdev_pages();
  1564. val->totalhigh = totalhigh_pages;
  1565. val->freehigh = nr_free_highpages();
  1566. val->mem_unit = PAGE_SIZE;
  1567. }
  1568. EXPORT_SYMBOL(si_meminfo);
  1569. #ifdef CONFIG_NUMA
  1570. void si_meminfo_node(struct sysinfo *val, int nid)
  1571. {
  1572. pg_data_t *pgdat = NODE_DATA(nid);
  1573. val->totalram = pgdat->node_present_pages;
  1574. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  1575. #ifdef CONFIG_HIGHMEM
  1576. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1577. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  1578. NR_FREE_PAGES);
  1579. #else
  1580. val->totalhigh = 0;
  1581. val->freehigh = 0;
  1582. #endif
  1583. val->mem_unit = PAGE_SIZE;
  1584. }
  1585. #endif
  1586. #define K(x) ((x) << (PAGE_SHIFT-10))
  1587. /*
  1588. * Show free area list (used inside shift_scroll-lock stuff)
  1589. * We also calculate the percentage fragmentation. We do this by counting the
  1590. * memory on each free list with the exception of the first item on the list.
  1591. */
  1592. void show_free_areas(void)
  1593. {
  1594. int cpu;
  1595. struct zone *zone;
  1596. for_each_zone(zone) {
  1597. if (!populated_zone(zone))
  1598. continue;
  1599. show_node(zone);
  1600. printk("%s per-cpu:\n", zone->name);
  1601. for_each_online_cpu(cpu) {
  1602. struct per_cpu_pageset *pageset;
  1603. pageset = zone_pcp(zone, cpu);
  1604. printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
  1605. "Cold: hi:%5d, btch:%4d usd:%4d\n",
  1606. cpu, pageset->pcp[0].high,
  1607. pageset->pcp[0].batch, pageset->pcp[0].count,
  1608. pageset->pcp[1].high, pageset->pcp[1].batch,
  1609. pageset->pcp[1].count);
  1610. }
  1611. }
  1612. printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  1613. " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
  1614. global_page_state(NR_ACTIVE),
  1615. global_page_state(NR_INACTIVE),
  1616. global_page_state(NR_FILE_DIRTY),
  1617. global_page_state(NR_WRITEBACK),
  1618. global_page_state(NR_UNSTABLE_NFS),
  1619. global_page_state(NR_FREE_PAGES),
  1620. global_page_state(NR_SLAB_RECLAIMABLE) +
  1621. global_page_state(NR_SLAB_UNRECLAIMABLE),
  1622. global_page_state(NR_FILE_MAPPED),
  1623. global_page_state(NR_PAGETABLE),
  1624. global_page_state(NR_BOUNCE));
  1625. for_each_zone(zone) {
  1626. int i;
  1627. if (!populated_zone(zone))
  1628. continue;
  1629. show_node(zone);
  1630. printk("%s"
  1631. " free:%lukB"
  1632. " min:%lukB"
  1633. " low:%lukB"
  1634. " high:%lukB"
  1635. " active:%lukB"
  1636. " inactive:%lukB"
  1637. " present:%lukB"
  1638. " pages_scanned:%lu"
  1639. " all_unreclaimable? %s"
  1640. "\n",
  1641. zone->name,
  1642. K(zone_page_state(zone, NR_FREE_PAGES)),
  1643. K(zone->pages_min),
  1644. K(zone->pages_low),
  1645. K(zone->pages_high),
  1646. K(zone_page_state(zone, NR_ACTIVE)),
  1647. K(zone_page_state(zone, NR_INACTIVE)),
  1648. K(zone->present_pages),
  1649. zone->pages_scanned,
  1650. (zone->all_unreclaimable ? "yes" : "no")
  1651. );
  1652. printk("lowmem_reserve[]:");
  1653. for (i = 0; i < MAX_NR_ZONES; i++)
  1654. printk(" %lu", zone->lowmem_reserve[i]);
  1655. printk("\n");
  1656. }
  1657. for_each_zone(zone) {
  1658. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  1659. if (!populated_zone(zone))
  1660. continue;
  1661. show_node(zone);
  1662. printk("%s: ", zone->name);
  1663. spin_lock_irqsave(&zone->lock, flags);
  1664. for (order = 0; order < MAX_ORDER; order++) {
  1665. nr[order] = zone->free_area[order].nr_free;
  1666. total += nr[order] << order;
  1667. }
  1668. spin_unlock_irqrestore(&zone->lock, flags);
  1669. for (order = 0; order < MAX_ORDER; order++)
  1670. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  1671. printk("= %lukB\n", K(total));
  1672. }
  1673. show_swap_cache_info();
  1674. }
  1675. /*
  1676. * Builds allocation fallback zone lists.
  1677. *
  1678. * Add all populated zones of a node to the zonelist.
  1679. */
  1680. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  1681. int nr_zones, enum zone_type zone_type)
  1682. {
  1683. struct zone *zone;
  1684. BUG_ON(zone_type >= MAX_NR_ZONES);
  1685. zone_type++;
  1686. do {
  1687. zone_type--;
  1688. zone = pgdat->node_zones + zone_type;
  1689. if (populated_zone(zone)) {
  1690. zonelist->zones[nr_zones++] = zone;
  1691. check_highest_zone(zone_type);
  1692. }
  1693. } while (zone_type);
  1694. return nr_zones;
  1695. }
  1696. /*
  1697. * zonelist_order:
  1698. * 0 = automatic detection of better ordering.
  1699. * 1 = order by ([node] distance, -zonetype)
  1700. * 2 = order by (-zonetype, [node] distance)
  1701. *
  1702. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  1703. * the same zonelist. So only NUMA can configure this param.
  1704. */
  1705. #define ZONELIST_ORDER_DEFAULT 0
  1706. #define ZONELIST_ORDER_NODE 1
  1707. #define ZONELIST_ORDER_ZONE 2
  1708. /* zonelist order in the kernel.
  1709. * set_zonelist_order() will set this to NODE or ZONE.
  1710. */
  1711. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1712. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  1713. #ifdef CONFIG_NUMA
  1714. /* The value user specified ....changed by config */
  1715. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1716. /* string for sysctl */
  1717. #define NUMA_ZONELIST_ORDER_LEN 16
  1718. char numa_zonelist_order[16] = "default";
  1719. /*
  1720. * interface for configure zonelist ordering.
  1721. * command line option "numa_zonelist_order"
  1722. * = "[dD]efault - default, automatic configuration.
  1723. * = "[nN]ode - order by node locality, then by zone within node
  1724. * = "[zZ]one - order by zone, then by locality within zone
  1725. */
  1726. static int __parse_numa_zonelist_order(char *s)
  1727. {
  1728. if (*s == 'd' || *s == 'D') {
  1729. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  1730. } else if (*s == 'n' || *s == 'N') {
  1731. user_zonelist_order = ZONELIST_ORDER_NODE;
  1732. } else if (*s == 'z' || *s == 'Z') {
  1733. user_zonelist_order = ZONELIST_ORDER_ZONE;
  1734. } else {
  1735. printk(KERN_WARNING
  1736. "Ignoring invalid numa_zonelist_order value: "
  1737. "%s\n", s);
  1738. return -EINVAL;
  1739. }
  1740. return 0;
  1741. }
  1742. static __init int setup_numa_zonelist_order(char *s)
  1743. {
  1744. if (s)
  1745. return __parse_numa_zonelist_order(s);
  1746. return 0;
  1747. }
  1748. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  1749. /*
  1750. * sysctl handler for numa_zonelist_order
  1751. */
  1752. int numa_zonelist_order_handler(ctl_table *table, int write,
  1753. struct file *file, void __user *buffer, size_t *length,
  1754. loff_t *ppos)
  1755. {
  1756. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  1757. int ret;
  1758. if (write)
  1759. strncpy(saved_string, (char*)table->data,
  1760. NUMA_ZONELIST_ORDER_LEN);
  1761. ret = proc_dostring(table, write, file, buffer, length, ppos);
  1762. if (ret)
  1763. return ret;
  1764. if (write) {
  1765. int oldval = user_zonelist_order;
  1766. if (__parse_numa_zonelist_order((char*)table->data)) {
  1767. /*
  1768. * bogus value. restore saved string
  1769. */
  1770. strncpy((char*)table->data, saved_string,
  1771. NUMA_ZONELIST_ORDER_LEN);
  1772. user_zonelist_order = oldval;
  1773. } else if (oldval != user_zonelist_order)
  1774. build_all_zonelists();
  1775. }
  1776. return 0;
  1777. }
  1778. #define MAX_NODE_LOAD (num_online_nodes())
  1779. static int node_load[MAX_NUMNODES];
  1780. /**
  1781. * find_next_best_node - find the next node that should appear in a given node's fallback list
  1782. * @node: node whose fallback list we're appending
  1783. * @used_node_mask: nodemask_t of already used nodes
  1784. *
  1785. * We use a number of factors to determine which is the next node that should
  1786. * appear on a given node's fallback list. The node should not have appeared
  1787. * already in @node's fallback list, and it should be the next closest node
  1788. * according to the distance array (which contains arbitrary distance values
  1789. * from each node to each node in the system), and should also prefer nodes
  1790. * with no CPUs, since presumably they'll have very little allocation pressure
  1791. * on them otherwise.
  1792. * It returns -1 if no node is found.
  1793. */
  1794. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  1795. {
  1796. int n, val;
  1797. int min_val = INT_MAX;
  1798. int best_node = -1;
  1799. /* Use the local node if we haven't already */
  1800. if (!node_isset(node, *used_node_mask)) {
  1801. node_set(node, *used_node_mask);
  1802. return node;
  1803. }
  1804. for_each_node_state(n, N_HIGH_MEMORY) {
  1805. cpumask_t tmp;
  1806. /* Don't want a node to appear more than once */
  1807. if (node_isset(n, *used_node_mask))
  1808. continue;
  1809. /* Use the distance array to find the distance */
  1810. val = node_distance(node, n);
  1811. /* Penalize nodes under us ("prefer the next node") */
  1812. val += (n < node);
  1813. /* Give preference to headless and unused nodes */
  1814. tmp = node_to_cpumask(n);
  1815. if (!cpus_empty(tmp))
  1816. val += PENALTY_FOR_NODE_WITH_CPUS;
  1817. /* Slight preference for less loaded node */
  1818. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  1819. val += node_load[n];
  1820. if (val < min_val) {
  1821. min_val = val;
  1822. best_node = n;
  1823. }
  1824. }
  1825. if (best_node >= 0)
  1826. node_set(best_node, *used_node_mask);
  1827. return best_node;
  1828. }
  1829. /*
  1830. * Build zonelists ordered by node and zones within node.
  1831. * This results in maximum locality--normal zone overflows into local
  1832. * DMA zone, if any--but risks exhausting DMA zone.
  1833. */
  1834. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  1835. {
  1836. enum zone_type i;
  1837. int j;
  1838. struct zonelist *zonelist;
  1839. for (i = 0; i < MAX_NR_ZONES; i++) {
  1840. zonelist = pgdat->node_zonelists + i;
  1841. for (j = 0; zonelist->zones[j] != NULL; j++)
  1842. ;
  1843. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1844. zonelist->zones[j] = NULL;
  1845. }
  1846. }
  1847. /*
  1848. * Build gfp_thisnode zonelists
  1849. */
  1850. static void build_thisnode_zonelists(pg_data_t *pgdat)
  1851. {
  1852. enum zone_type i;
  1853. int j;
  1854. struct zonelist *zonelist;
  1855. for (i = 0; i < MAX_NR_ZONES; i++) {
  1856. zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
  1857. j = build_zonelists_node(pgdat, zonelist, 0, i);
  1858. zonelist->zones[j] = NULL;
  1859. }
  1860. }
  1861. /*
  1862. * Build zonelists ordered by zone and nodes within zones.
  1863. * This results in conserving DMA zone[s] until all Normal memory is
  1864. * exhausted, but results in overflowing to remote node while memory
  1865. * may still exist in local DMA zone.
  1866. */
  1867. static int node_order[MAX_NUMNODES];
  1868. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  1869. {
  1870. enum zone_type i;
  1871. int pos, j, node;
  1872. int zone_type; /* needs to be signed */
  1873. struct zone *z;
  1874. struct zonelist *zonelist;
  1875. for (i = 0; i < MAX_NR_ZONES; i++) {
  1876. zonelist = pgdat->node_zonelists + i;
  1877. pos = 0;
  1878. for (zone_type = i; zone_type >= 0; zone_type--) {
  1879. for (j = 0; j < nr_nodes; j++) {
  1880. node = node_order[j];
  1881. z = &NODE_DATA(node)->node_zones[zone_type];
  1882. if (populated_zone(z)) {
  1883. zonelist->zones[pos++] = z;
  1884. check_highest_zone(zone_type);
  1885. }
  1886. }
  1887. }
  1888. zonelist->zones[pos] = NULL;
  1889. }
  1890. }
  1891. static int default_zonelist_order(void)
  1892. {
  1893. int nid, zone_type;
  1894. unsigned long low_kmem_size,total_size;
  1895. struct zone *z;
  1896. int average_size;
  1897. /*
  1898. * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
  1899. * If they are really small and used heavily, the system can fall
  1900. * into OOM very easily.
  1901. * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
  1902. */
  1903. /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
  1904. low_kmem_size = 0;
  1905. total_size = 0;
  1906. for_each_online_node(nid) {
  1907. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  1908. z = &NODE_DATA(nid)->node_zones[zone_type];
  1909. if (populated_zone(z)) {
  1910. if (zone_type < ZONE_NORMAL)
  1911. low_kmem_size += z->present_pages;
  1912. total_size += z->present_pages;
  1913. }
  1914. }
  1915. }
  1916. if (!low_kmem_size || /* there are no DMA area. */
  1917. low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
  1918. return ZONELIST_ORDER_NODE;
  1919. /*
  1920. * look into each node's config.
  1921. * If there is a node whose DMA/DMA32 memory is very big area on
  1922. * local memory, NODE_ORDER may be suitable.
  1923. */
  1924. average_size = total_size /
  1925. (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
  1926. for_each_online_node(nid) {
  1927. low_kmem_size = 0;
  1928. total_size = 0;
  1929. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  1930. z = &NODE_DATA(nid)->node_zones[zone_type];
  1931. if (populated_zone(z)) {
  1932. if (zone_type < ZONE_NORMAL)
  1933. low_kmem_size += z->present_pages;
  1934. total_size += z->present_pages;
  1935. }
  1936. }
  1937. if (low_kmem_size &&
  1938. total_size > average_size && /* ignore small node */
  1939. low_kmem_size > total_size * 70/100)
  1940. return ZONELIST_ORDER_NODE;
  1941. }
  1942. return ZONELIST_ORDER_ZONE;
  1943. }
  1944. static void set_zonelist_order(void)
  1945. {
  1946. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  1947. current_zonelist_order = default_zonelist_order();
  1948. else
  1949. current_zonelist_order = user_zonelist_order;
  1950. }
  1951. static void build_zonelists(pg_data_t *pgdat)
  1952. {
  1953. int j, node, load;
  1954. enum zone_type i;
  1955. nodemask_t used_mask;
  1956. int local_node, prev_node;
  1957. struct zonelist *zonelist;
  1958. int order = current_zonelist_order;
  1959. /* initialize zonelists */
  1960. for (i = 0; i < MAX_ZONELISTS; i++) {
  1961. zonelist = pgdat->node_zonelists + i;
  1962. zonelist->zones[0] = NULL;
  1963. }
  1964. /* NUMA-aware ordering of nodes */
  1965. local_node = pgdat->node_id;
  1966. load = num_online_nodes();
  1967. prev_node = local_node;
  1968. nodes_clear(used_mask);
  1969. memset(node_load, 0, sizeof(node_load));
  1970. memset(node_order, 0, sizeof(node_order));
  1971. j = 0;
  1972. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  1973. int distance = node_distance(local_node, node);
  1974. /*
  1975. * If another node is sufficiently far away then it is better
  1976. * to reclaim pages in a zone before going off node.
  1977. */
  1978. if (distance > RECLAIM_DISTANCE)
  1979. zone_reclaim_mode = 1;
  1980. /*
  1981. * We don't want to pressure a particular node.
  1982. * So adding penalty to the first node in same
  1983. * distance group to make it round-robin.
  1984. */
  1985. if (distance != node_distance(local_node, prev_node))
  1986. node_load[node] = load;
  1987. prev_node = node;
  1988. load--;
  1989. if (order == ZONELIST_ORDER_NODE)
  1990. build_zonelists_in_node_order(pgdat, node);
  1991. else
  1992. node_order[j++] = node; /* remember order */
  1993. }
  1994. if (order == ZONELIST_ORDER_ZONE) {
  1995. /* calculate node order -- i.e., DMA last! */
  1996. build_zonelists_in_zone_order(pgdat, j);
  1997. }
  1998. build_thisnode_zonelists(pgdat);
  1999. }
  2000. /* Construct the zonelist performance cache - see further mmzone.h */
  2001. static void build_zonelist_cache(pg_data_t *pgdat)
  2002. {
  2003. int i;
  2004. for (i = 0; i < MAX_NR_ZONES; i++) {
  2005. struct zonelist *zonelist;
  2006. struct zonelist_cache *zlc;
  2007. struct zone **z;
  2008. zonelist = pgdat->node_zonelists + i;
  2009. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  2010. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  2011. for (z = zonelist->zones; *z; z++)
  2012. zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
  2013. }
  2014. }
  2015. #else /* CONFIG_NUMA */
  2016. static void set_zonelist_order(void)
  2017. {
  2018. current_zonelist_order = ZONELIST_ORDER_ZONE;
  2019. }
  2020. static void build_zonelists(pg_data_t *pgdat)
  2021. {
  2022. int node, local_node;
  2023. enum zone_type i,j;
  2024. local_node = pgdat->node_id;
  2025. for (i = 0; i < MAX_NR_ZONES; i++) {
  2026. struct zonelist *zonelist;
  2027. zonelist = pgdat->node_zonelists + i;
  2028. j = build_zonelists_node(pgdat, zonelist, 0, i);
  2029. /*
  2030. * Now we build the zonelist so that it contains the zones
  2031. * of all the other nodes.
  2032. * We don't want to pressure a particular node, so when
  2033. * building the zones for node N, we make sure that the
  2034. * zones coming right after the local ones are those from
  2035. * node N+1 (modulo N)
  2036. */
  2037. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  2038. if (!node_online(node))
  2039. continue;
  2040. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  2041. }
  2042. for (node = 0; node < local_node; node++) {
  2043. if (!node_online(node))
  2044. continue;
  2045. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  2046. }
  2047. zonelist->zones[j] = NULL;
  2048. }
  2049. }
  2050. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  2051. static void build_zonelist_cache(pg_data_t *pgdat)
  2052. {
  2053. int i;
  2054. for (i = 0; i < MAX_NR_ZONES; i++)
  2055. pgdat->node_zonelists[i].zlcache_ptr = NULL;
  2056. }
  2057. #endif /* CONFIG_NUMA */
  2058. /* return values int ....just for stop_machine_run() */
  2059. static int __build_all_zonelists(void *dummy)
  2060. {
  2061. int nid;
  2062. for_each_online_node(nid) {
  2063. pg_data_t *pgdat = NODE_DATA(nid);
  2064. build_zonelists(pgdat);
  2065. build_zonelist_cache(pgdat);
  2066. }
  2067. return 0;
  2068. }
  2069. void build_all_zonelists(void)
  2070. {
  2071. set_zonelist_order();
  2072. if (system_state == SYSTEM_BOOTING) {
  2073. __build_all_zonelists(NULL);
  2074. cpuset_init_current_mems_allowed();
  2075. } else {
  2076. /* we have to stop all cpus to guaranntee there is no user
  2077. of zonelist */
  2078. stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
  2079. /* cpuset refresh routine should be here */
  2080. }
  2081. vm_total_pages = nr_free_pagecache_pages();
  2082. /*
  2083. * Disable grouping by mobility if the number of pages in the
  2084. * system is too low to allow the mechanism to work. It would be
  2085. * more accurate, but expensive to check per-zone. This check is
  2086. * made on memory-hotadd so a system can start with mobility
  2087. * disabled and enable it later
  2088. */
  2089. if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES))
  2090. page_group_by_mobility_disabled = 1;
  2091. else
  2092. page_group_by_mobility_disabled = 0;
  2093. printk("Built %i zonelists in %s order, mobility grouping %s. "
  2094. "Total pages: %ld\n",
  2095. num_online_nodes(),
  2096. zonelist_order_name[current_zonelist_order],
  2097. page_group_by_mobility_disabled ? "off" : "on",
  2098. vm_total_pages);
  2099. #ifdef CONFIG_NUMA
  2100. printk("Policy zone: %s\n", zone_names[policy_zone]);
  2101. #endif
  2102. }
  2103. /*
  2104. * Helper functions to size the waitqueue hash table.
  2105. * Essentially these want to choose hash table sizes sufficiently
  2106. * large so that collisions trying to wait on pages are rare.
  2107. * But in fact, the number of active page waitqueues on typical
  2108. * systems is ridiculously low, less than 200. So this is even
  2109. * conservative, even though it seems large.
  2110. *
  2111. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  2112. * waitqueues, i.e. the size of the waitq table given the number of pages.
  2113. */
  2114. #define PAGES_PER_WAITQUEUE 256
  2115. #ifndef CONFIG_MEMORY_HOTPLUG
  2116. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2117. {
  2118. unsigned long size = 1;
  2119. pages /= PAGES_PER_WAITQUEUE;
  2120. while (size < pages)
  2121. size <<= 1;
  2122. /*
  2123. * Once we have dozens or even hundreds of threads sleeping
  2124. * on IO we've got bigger problems than wait queue collision.
  2125. * Limit the size of the wait table to a reasonable size.
  2126. */
  2127. size = min(size, 4096UL);
  2128. return max(size, 4UL);
  2129. }
  2130. #else
  2131. /*
  2132. * A zone's size might be changed by hot-add, so it is not possible to determine
  2133. * a suitable size for its wait_table. So we use the maximum size now.
  2134. *
  2135. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  2136. *
  2137. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  2138. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  2139. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  2140. *
  2141. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  2142. * or more by the traditional way. (See above). It equals:
  2143. *
  2144. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  2145. * ia64(16K page size) : = ( 8G + 4M)byte.
  2146. * powerpc (64K page size) : = (32G +16M)byte.
  2147. */
  2148. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  2149. {
  2150. return 4096UL;
  2151. }
  2152. #endif
  2153. /*
  2154. * This is an integer logarithm so that shifts can be used later
  2155. * to extract the more random high bits from the multiplicative
  2156. * hash function before the remainder is taken.
  2157. */
  2158. static inline unsigned long wait_table_bits(unsigned long size)
  2159. {
  2160. return ffz(~size);
  2161. }
  2162. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  2163. /*
  2164. * Initially all pages are reserved - free ones are freed
  2165. * up by free_all_bootmem() once the early boot process is
  2166. * done. Non-atomic initialization, single-pass.
  2167. */
  2168. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  2169. unsigned long start_pfn, enum memmap_context context)
  2170. {
  2171. struct page *page;
  2172. unsigned long end_pfn = start_pfn + size;
  2173. unsigned long pfn;
  2174. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  2175. /*
  2176. * There can be holes in boot-time mem_map[]s
  2177. * handed to this function. They do not
  2178. * exist on hotplugged memory.
  2179. */
  2180. if (context == MEMMAP_EARLY) {
  2181. if (!early_pfn_valid(pfn))
  2182. continue;
  2183. if (!early_pfn_in_nid(pfn, nid))
  2184. continue;
  2185. }
  2186. page = pfn_to_page(pfn);
  2187. set_page_links(page, zone, nid, pfn);
  2188. init_page_count(page);
  2189. reset_page_mapcount(page);
  2190. SetPageReserved(page);
  2191. /*
  2192. * Mark the block movable so that blocks are reserved for
  2193. * movable at startup. This will force kernel allocations
  2194. * to reserve their blocks rather than leaking throughout
  2195. * the address space during boot when many long-lived
  2196. * kernel allocations are made
  2197. */
  2198. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  2199. INIT_LIST_HEAD(&page->lru);
  2200. #ifdef WANT_PAGE_VIRTUAL
  2201. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  2202. if (!is_highmem_idx(zone))
  2203. set_page_address(page, __va(pfn << PAGE_SHIFT));
  2204. #endif
  2205. }
  2206. }
  2207. static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
  2208. struct zone *zone, unsigned long size)
  2209. {
  2210. int order, t;
  2211. for_each_migratetype_order(order, t) {
  2212. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  2213. zone->free_area[order].nr_free = 0;
  2214. }
  2215. }
  2216. #ifndef __HAVE_ARCH_MEMMAP_INIT
  2217. #define memmap_init(size, nid, zone, start_pfn) \
  2218. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  2219. #endif
  2220. static int __devinit zone_batchsize(struct zone *zone)
  2221. {
  2222. int batch;
  2223. /*
  2224. * The per-cpu-pages pools are set to around 1000th of the
  2225. * size of the zone. But no more than 1/2 of a meg.
  2226. *
  2227. * OK, so we don't know how big the cache is. So guess.
  2228. */
  2229. batch = zone->present_pages / 1024;
  2230. if (batch * PAGE_SIZE > 512 * 1024)
  2231. batch = (512 * 1024) / PAGE_SIZE;
  2232. batch /= 4; /* We effectively *= 4 below */
  2233. if (batch < 1)
  2234. batch = 1;
  2235. /*
  2236. * Clamp the batch to a 2^n - 1 value. Having a power
  2237. * of 2 value was found to be more likely to have
  2238. * suboptimal cache aliasing properties in some cases.
  2239. *
  2240. * For example if 2 tasks are alternately allocating
  2241. * batches of pages, one task can end up with a lot
  2242. * of pages of one half of the possible page colors
  2243. * and the other with pages of the other colors.
  2244. */
  2245. batch = (1 << (fls(batch + batch/2)-1)) - 1;
  2246. return batch;
  2247. }
  2248. inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  2249. {
  2250. struct per_cpu_pages *pcp;
  2251. memset(p, 0, sizeof(*p));
  2252. pcp = &p->pcp[0]; /* hot */
  2253. pcp->count = 0;
  2254. pcp->high = 6 * batch;
  2255. pcp->batch = max(1UL, 1 * batch);
  2256. INIT_LIST_HEAD(&pcp->list);
  2257. pcp = &p->pcp[1]; /* cold*/
  2258. pcp->count = 0;
  2259. pcp->high = 2 * batch;
  2260. pcp->batch = max(1UL, batch/2);
  2261. INIT_LIST_HEAD(&pcp->list);
  2262. }
  2263. /*
  2264. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  2265. * to the value high for the pageset p.
  2266. */
  2267. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  2268. unsigned long high)
  2269. {
  2270. struct per_cpu_pages *pcp;
  2271. pcp = &p->pcp[0]; /* hot list */
  2272. pcp->high = high;
  2273. pcp->batch = max(1UL, high/4);
  2274. if ((high/4) > (PAGE_SHIFT * 8))
  2275. pcp->batch = PAGE_SHIFT * 8;
  2276. }
  2277. #ifdef CONFIG_NUMA
  2278. /*
  2279. * Boot pageset table. One per cpu which is going to be used for all
  2280. * zones and all nodes. The parameters will be set in such a way
  2281. * that an item put on a list will immediately be handed over to
  2282. * the buddy list. This is safe since pageset manipulation is done
  2283. * with interrupts disabled.
  2284. *
  2285. * Some NUMA counter updates may also be caught by the boot pagesets.
  2286. *
  2287. * The boot_pagesets must be kept even after bootup is complete for
  2288. * unused processors and/or zones. They do play a role for bootstrapping
  2289. * hotplugged processors.
  2290. *
  2291. * zoneinfo_show() and maybe other functions do
  2292. * not check if the processor is online before following the pageset pointer.
  2293. * Other parts of the kernel may not check if the zone is available.
  2294. */
  2295. static struct per_cpu_pageset boot_pageset[NR_CPUS];
  2296. /*
  2297. * Dynamically allocate memory for the
  2298. * per cpu pageset array in struct zone.
  2299. */
  2300. static int __cpuinit process_zones(int cpu)
  2301. {
  2302. struct zone *zone, *dzone;
  2303. int node = cpu_to_node(cpu);
  2304. node_set_state(node, N_CPU); /* this node has a cpu */
  2305. for_each_zone(zone) {
  2306. if (!populated_zone(zone))
  2307. continue;
  2308. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  2309. GFP_KERNEL, node);
  2310. if (!zone_pcp(zone, cpu))
  2311. goto bad;
  2312. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  2313. if (percpu_pagelist_fraction)
  2314. setup_pagelist_highmark(zone_pcp(zone, cpu),
  2315. (zone->present_pages / percpu_pagelist_fraction));
  2316. }
  2317. return 0;
  2318. bad:
  2319. for_each_zone(dzone) {
  2320. if (!populated_zone(dzone))
  2321. continue;
  2322. if (dzone == zone)
  2323. break;
  2324. kfree(zone_pcp(dzone, cpu));
  2325. zone_pcp(dzone, cpu) = NULL;
  2326. }
  2327. return -ENOMEM;
  2328. }
  2329. static inline void free_zone_pagesets(int cpu)
  2330. {
  2331. struct zone *zone;
  2332. for_each_zone(zone) {
  2333. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  2334. /* Free per_cpu_pageset if it is slab allocated */
  2335. if (pset != &boot_pageset[cpu])
  2336. kfree(pset);
  2337. zone_pcp(zone, cpu) = NULL;
  2338. }
  2339. }
  2340. static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
  2341. unsigned long action,
  2342. void *hcpu)
  2343. {
  2344. int cpu = (long)hcpu;
  2345. int ret = NOTIFY_OK;
  2346. switch (action) {
  2347. case CPU_UP_PREPARE:
  2348. case CPU_UP_PREPARE_FROZEN:
  2349. if (process_zones(cpu))
  2350. ret = NOTIFY_BAD;
  2351. break;
  2352. case CPU_UP_CANCELED:
  2353. case CPU_UP_CANCELED_FROZEN:
  2354. case CPU_DEAD:
  2355. case CPU_DEAD_FROZEN:
  2356. free_zone_pagesets(cpu);
  2357. break;
  2358. default:
  2359. break;
  2360. }
  2361. return ret;
  2362. }
  2363. static struct notifier_block __cpuinitdata pageset_notifier =
  2364. { &pageset_cpuup_callback, NULL, 0 };
  2365. void __init setup_per_cpu_pageset(void)
  2366. {
  2367. int err;
  2368. /* Initialize per_cpu_pageset for cpu 0.
  2369. * A cpuup callback will do this for every cpu
  2370. * as it comes online
  2371. */
  2372. err = process_zones(smp_processor_id());
  2373. BUG_ON(err);
  2374. register_cpu_notifier(&pageset_notifier);
  2375. }
  2376. #endif
  2377. static noinline __init_refok
  2378. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  2379. {
  2380. int i;
  2381. struct pglist_data *pgdat = zone->zone_pgdat;
  2382. size_t alloc_size;
  2383. /*
  2384. * The per-page waitqueue mechanism uses hashed waitqueues
  2385. * per zone.
  2386. */
  2387. zone->wait_table_hash_nr_entries =
  2388. wait_table_hash_nr_entries(zone_size_pages);
  2389. zone->wait_table_bits =
  2390. wait_table_bits(zone->wait_table_hash_nr_entries);
  2391. alloc_size = zone->wait_table_hash_nr_entries
  2392. * sizeof(wait_queue_head_t);
  2393. if (system_state == SYSTEM_BOOTING) {
  2394. zone->wait_table = (wait_queue_head_t *)
  2395. alloc_bootmem_node(pgdat, alloc_size);
  2396. } else {
  2397. /*
  2398. * This case means that a zone whose size was 0 gets new memory
  2399. * via memory hot-add.
  2400. * But it may be the case that a new node was hot-added. In
  2401. * this case vmalloc() will not be able to use this new node's
  2402. * memory - this wait_table must be initialized to use this new
  2403. * node itself as well.
  2404. * To use this new node's memory, further consideration will be
  2405. * necessary.
  2406. */
  2407. zone->wait_table = vmalloc(alloc_size);
  2408. }
  2409. if (!zone->wait_table)
  2410. return -ENOMEM;
  2411. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  2412. init_waitqueue_head(zone->wait_table + i);
  2413. return 0;
  2414. }
  2415. static __meminit void zone_pcp_init(struct zone *zone)
  2416. {
  2417. int cpu;
  2418. unsigned long batch = zone_batchsize(zone);
  2419. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  2420. #ifdef CONFIG_NUMA
  2421. /* Early boot. Slab allocator not functional yet */
  2422. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  2423. setup_pageset(&boot_pageset[cpu],0);
  2424. #else
  2425. setup_pageset(zone_pcp(zone,cpu), batch);
  2426. #endif
  2427. }
  2428. if (zone->present_pages)
  2429. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  2430. zone->name, zone->present_pages, batch);
  2431. }
  2432. __meminit int init_currently_empty_zone(struct zone *zone,
  2433. unsigned long zone_start_pfn,
  2434. unsigned long size,
  2435. enum memmap_context context)
  2436. {
  2437. struct pglist_data *pgdat = zone->zone_pgdat;
  2438. int ret;
  2439. ret = zone_wait_table_init(zone, size);
  2440. if (ret)
  2441. return ret;
  2442. pgdat->nr_zones = zone_idx(zone) + 1;
  2443. zone->zone_start_pfn = zone_start_pfn;
  2444. memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
  2445. zone_init_free_lists(pgdat, zone, zone->spanned_pages);
  2446. return 0;
  2447. }
  2448. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2449. /*
  2450. * Basic iterator support. Return the first range of PFNs for a node
  2451. * Note: nid == MAX_NUMNODES returns first region regardless of node
  2452. */
  2453. static int __meminit first_active_region_index_in_nid(int nid)
  2454. {
  2455. int i;
  2456. for (i = 0; i < nr_nodemap_entries; i++)
  2457. if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
  2458. return i;
  2459. return -1;
  2460. }
  2461. /*
  2462. * Basic iterator support. Return the next active range of PFNs for a node
  2463. * Note: nid == MAX_NUMNODES returns next region regardles of node
  2464. */
  2465. static int __meminit next_active_region_index_in_nid(int index, int nid)
  2466. {
  2467. for (index = index + 1; index < nr_nodemap_entries; index++)
  2468. if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
  2469. return index;
  2470. return -1;
  2471. }
  2472. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  2473. /*
  2474. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  2475. * Architectures may implement their own version but if add_active_range()
  2476. * was used and there are no special requirements, this is a convenient
  2477. * alternative
  2478. */
  2479. int __meminit early_pfn_to_nid(unsigned long pfn)
  2480. {
  2481. int i;
  2482. for (i = 0; i < nr_nodemap_entries; i++) {
  2483. unsigned long start_pfn = early_node_map[i].start_pfn;
  2484. unsigned long end_pfn = early_node_map[i].end_pfn;
  2485. if (start_pfn <= pfn && pfn < end_pfn)
  2486. return early_node_map[i].nid;
  2487. }
  2488. return 0;
  2489. }
  2490. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  2491. /* Basic iterator support to walk early_node_map[] */
  2492. #define for_each_active_range_index_in_nid(i, nid) \
  2493. for (i = first_active_region_index_in_nid(nid); i != -1; \
  2494. i = next_active_region_index_in_nid(i, nid))
  2495. /**
  2496. * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  2497. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  2498. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
  2499. *
  2500. * If an architecture guarantees that all ranges registered with
  2501. * add_active_ranges() contain no holes and may be freed, this
  2502. * this function may be used instead of calling free_bootmem() manually.
  2503. */
  2504. void __init free_bootmem_with_active_regions(int nid,
  2505. unsigned long max_low_pfn)
  2506. {
  2507. int i;
  2508. for_each_active_range_index_in_nid(i, nid) {
  2509. unsigned long size_pages = 0;
  2510. unsigned long end_pfn = early_node_map[i].end_pfn;
  2511. if (early_node_map[i].start_pfn >= max_low_pfn)
  2512. continue;
  2513. if (end_pfn > max_low_pfn)
  2514. end_pfn = max_low_pfn;
  2515. size_pages = end_pfn - early_node_map[i].start_pfn;
  2516. free_bootmem_node(NODE_DATA(early_node_map[i].nid),
  2517. PFN_PHYS(early_node_map[i].start_pfn),
  2518. size_pages << PAGE_SHIFT);
  2519. }
  2520. }
  2521. /**
  2522. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  2523. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  2524. *
  2525. * If an architecture guarantees that all ranges registered with
  2526. * add_active_ranges() contain no holes and may be freed, this
  2527. * function may be used instead of calling memory_present() manually.
  2528. */
  2529. void __init sparse_memory_present_with_active_regions(int nid)
  2530. {
  2531. int i;
  2532. for_each_active_range_index_in_nid(i, nid)
  2533. memory_present(early_node_map[i].nid,
  2534. early_node_map[i].start_pfn,
  2535. early_node_map[i].end_pfn);
  2536. }
  2537. /**
  2538. * push_node_boundaries - Push node boundaries to at least the requested boundary
  2539. * @nid: The nid of the node to push the boundary for
  2540. * @start_pfn: The start pfn of the node
  2541. * @end_pfn: The end pfn of the node
  2542. *
  2543. * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
  2544. * time. Specifically, on x86_64, SRAT will report ranges that can potentially
  2545. * be hotplugged even though no physical memory exists. This function allows
  2546. * an arch to push out the node boundaries so mem_map is allocated that can
  2547. * be used later.
  2548. */
  2549. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  2550. void __init push_node_boundaries(unsigned int nid,
  2551. unsigned long start_pfn, unsigned long end_pfn)
  2552. {
  2553. printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
  2554. nid, start_pfn, end_pfn);
  2555. /* Initialise the boundary for this node if necessary */
  2556. if (node_boundary_end_pfn[nid] == 0)
  2557. node_boundary_start_pfn[nid] = -1UL;
  2558. /* Update the boundaries */
  2559. if (node_boundary_start_pfn[nid] > start_pfn)
  2560. node_boundary_start_pfn[nid] = start_pfn;
  2561. if (node_boundary_end_pfn[nid] < end_pfn)
  2562. node_boundary_end_pfn[nid] = end_pfn;
  2563. }
  2564. /* If necessary, push the node boundary out for reserve hotadd */
  2565. static void __meminit account_node_boundary(unsigned int nid,
  2566. unsigned long *start_pfn, unsigned long *end_pfn)
  2567. {
  2568. printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
  2569. nid, *start_pfn, *end_pfn);
  2570. /* Return if boundary information has not been provided */
  2571. if (node_boundary_end_pfn[nid] == 0)
  2572. return;
  2573. /* Check the boundaries and update if necessary */
  2574. if (node_boundary_start_pfn[nid] < *start_pfn)
  2575. *start_pfn = node_boundary_start_pfn[nid];
  2576. if (node_boundary_end_pfn[nid] > *end_pfn)
  2577. *end_pfn = node_boundary_end_pfn[nid];
  2578. }
  2579. #else
  2580. void __init push_node_boundaries(unsigned int nid,
  2581. unsigned long start_pfn, unsigned long end_pfn) {}
  2582. static void __meminit account_node_boundary(unsigned int nid,
  2583. unsigned long *start_pfn, unsigned long *end_pfn) {}
  2584. #endif
  2585. /**
  2586. * get_pfn_range_for_nid - Return the start and end page frames for a node
  2587. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  2588. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  2589. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  2590. *
  2591. * It returns the start and end page frame of a node based on information
  2592. * provided by an arch calling add_active_range(). If called for a node
  2593. * with no available memory, a warning is printed and the start and end
  2594. * PFNs will be 0.
  2595. */
  2596. void __meminit get_pfn_range_for_nid(unsigned int nid,
  2597. unsigned long *start_pfn, unsigned long *end_pfn)
  2598. {
  2599. int i;
  2600. *start_pfn = -1UL;
  2601. *end_pfn = 0;
  2602. for_each_active_range_index_in_nid(i, nid) {
  2603. *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
  2604. *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
  2605. }
  2606. if (*start_pfn == -1UL)
  2607. *start_pfn = 0;
  2608. /* Push the node boundaries out if requested */
  2609. account_node_boundary(nid, start_pfn, end_pfn);
  2610. }
  2611. /*
  2612. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  2613. * assumption is made that zones within a node are ordered in monotonic
  2614. * increasing memory addresses so that the "highest" populated zone is used
  2615. */
  2616. void __init find_usable_zone_for_movable(void)
  2617. {
  2618. int zone_index;
  2619. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  2620. if (zone_index == ZONE_MOVABLE)
  2621. continue;
  2622. if (arch_zone_highest_possible_pfn[zone_index] >
  2623. arch_zone_lowest_possible_pfn[zone_index])
  2624. break;
  2625. }
  2626. VM_BUG_ON(zone_index == -1);
  2627. movable_zone = zone_index;
  2628. }
  2629. /*
  2630. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  2631. * because it is sized independant of architecture. Unlike the other zones,
  2632. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  2633. * in each node depending on the size of each node and how evenly kernelcore
  2634. * is distributed. This helper function adjusts the zone ranges
  2635. * provided by the architecture for a given node by using the end of the
  2636. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  2637. * zones within a node are in order of monotonic increases memory addresses
  2638. */
  2639. void __meminit adjust_zone_range_for_zone_movable(int nid,
  2640. unsigned long zone_type,
  2641. unsigned long node_start_pfn,
  2642. unsigned long node_end_pfn,
  2643. unsigned long *zone_start_pfn,
  2644. unsigned long *zone_end_pfn)
  2645. {
  2646. /* Only adjust if ZONE_MOVABLE is on this node */
  2647. if (zone_movable_pfn[nid]) {
  2648. /* Size ZONE_MOVABLE */
  2649. if (zone_type == ZONE_MOVABLE) {
  2650. *zone_start_pfn = zone_movable_pfn[nid];
  2651. *zone_end_pfn = min(node_end_pfn,
  2652. arch_zone_highest_possible_pfn[movable_zone]);
  2653. /* Adjust for ZONE_MOVABLE starting within this range */
  2654. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  2655. *zone_end_pfn > zone_movable_pfn[nid]) {
  2656. *zone_end_pfn = zone_movable_pfn[nid];
  2657. /* Check if this whole range is within ZONE_MOVABLE */
  2658. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  2659. *zone_start_pfn = *zone_end_pfn;
  2660. }
  2661. }
  2662. /*
  2663. * Return the number of pages a zone spans in a node, including holes
  2664. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  2665. */
  2666. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  2667. unsigned long zone_type,
  2668. unsigned long *ignored)
  2669. {
  2670. unsigned long node_start_pfn, node_end_pfn;
  2671. unsigned long zone_start_pfn, zone_end_pfn;
  2672. /* Get the start and end of the node and zone */
  2673. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2674. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  2675. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  2676. adjust_zone_range_for_zone_movable(nid, zone_type,
  2677. node_start_pfn, node_end_pfn,
  2678. &zone_start_pfn, &zone_end_pfn);
  2679. /* Check that this node has pages within the zone's required range */
  2680. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  2681. return 0;
  2682. /* Move the zone boundaries inside the node if necessary */
  2683. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  2684. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  2685. /* Return the spanned pages */
  2686. return zone_end_pfn - zone_start_pfn;
  2687. }
  2688. /*
  2689. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  2690. * then all holes in the requested range will be accounted for.
  2691. */
  2692. unsigned long __meminit __absent_pages_in_range(int nid,
  2693. unsigned long range_start_pfn,
  2694. unsigned long range_end_pfn)
  2695. {
  2696. int i = 0;
  2697. unsigned long prev_end_pfn = 0, hole_pages = 0;
  2698. unsigned long start_pfn;
  2699. /* Find the end_pfn of the first active range of pfns in the node */
  2700. i = first_active_region_index_in_nid(nid);
  2701. if (i == -1)
  2702. return 0;
  2703. prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  2704. /* Account for ranges before physical memory on this node */
  2705. if (early_node_map[i].start_pfn > range_start_pfn)
  2706. hole_pages = prev_end_pfn - range_start_pfn;
  2707. /* Find all holes for the zone within the node */
  2708. for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
  2709. /* No need to continue if prev_end_pfn is outside the zone */
  2710. if (prev_end_pfn >= range_end_pfn)
  2711. break;
  2712. /* Make sure the end of the zone is not within the hole */
  2713. start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  2714. prev_end_pfn = max(prev_end_pfn, range_start_pfn);
  2715. /* Update the hole size cound and move on */
  2716. if (start_pfn > range_start_pfn) {
  2717. BUG_ON(prev_end_pfn > start_pfn);
  2718. hole_pages += start_pfn - prev_end_pfn;
  2719. }
  2720. prev_end_pfn = early_node_map[i].end_pfn;
  2721. }
  2722. /* Account for ranges past physical memory on this node */
  2723. if (range_end_pfn > prev_end_pfn)
  2724. hole_pages += range_end_pfn -
  2725. max(range_start_pfn, prev_end_pfn);
  2726. return hole_pages;
  2727. }
  2728. /**
  2729. * absent_pages_in_range - Return number of page frames in holes within a range
  2730. * @start_pfn: The start PFN to start searching for holes
  2731. * @end_pfn: The end PFN to stop searching for holes
  2732. *
  2733. * It returns the number of pages frames in memory holes within a range.
  2734. */
  2735. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  2736. unsigned long end_pfn)
  2737. {
  2738. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  2739. }
  2740. /* Return the number of page frames in holes in a zone on a node */
  2741. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  2742. unsigned long zone_type,
  2743. unsigned long *ignored)
  2744. {
  2745. unsigned long node_start_pfn, node_end_pfn;
  2746. unsigned long zone_start_pfn, zone_end_pfn;
  2747. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2748. zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
  2749. node_start_pfn);
  2750. zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
  2751. node_end_pfn);
  2752. adjust_zone_range_for_zone_movable(nid, zone_type,
  2753. node_start_pfn, node_end_pfn,
  2754. &zone_start_pfn, &zone_end_pfn);
  2755. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  2756. }
  2757. #else
  2758. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  2759. unsigned long zone_type,
  2760. unsigned long *zones_size)
  2761. {
  2762. return zones_size[zone_type];
  2763. }
  2764. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  2765. unsigned long zone_type,
  2766. unsigned long *zholes_size)
  2767. {
  2768. if (!zholes_size)
  2769. return 0;
  2770. return zholes_size[zone_type];
  2771. }
  2772. #endif
  2773. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  2774. unsigned long *zones_size, unsigned long *zholes_size)
  2775. {
  2776. unsigned long realtotalpages, totalpages = 0;
  2777. enum zone_type i;
  2778. for (i = 0; i < MAX_NR_ZONES; i++)
  2779. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  2780. zones_size);
  2781. pgdat->node_spanned_pages = totalpages;
  2782. realtotalpages = totalpages;
  2783. for (i = 0; i < MAX_NR_ZONES; i++)
  2784. realtotalpages -=
  2785. zone_absent_pages_in_node(pgdat->node_id, i,
  2786. zholes_size);
  2787. pgdat->node_present_pages = realtotalpages;
  2788. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  2789. realtotalpages);
  2790. }
  2791. #ifndef CONFIG_SPARSEMEM
  2792. /*
  2793. * Calculate the size of the zone->blockflags rounded to an unsigned long
  2794. * Start by making sure zonesize is a multiple of MAX_ORDER-1 by rounding up
  2795. * Then figure 1 NR_PAGEBLOCK_BITS worth of bits per MAX_ORDER-1, finally
  2796. * round what is now in bits to nearest long in bits, then return it in
  2797. * bytes.
  2798. */
  2799. static unsigned long __init usemap_size(unsigned long zonesize)
  2800. {
  2801. unsigned long usemapsize;
  2802. usemapsize = roundup(zonesize, MAX_ORDER_NR_PAGES);
  2803. usemapsize = usemapsize >> (MAX_ORDER-1);
  2804. usemapsize *= NR_PAGEBLOCK_BITS;
  2805. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  2806. return usemapsize / 8;
  2807. }
  2808. static void __init setup_usemap(struct pglist_data *pgdat,
  2809. struct zone *zone, unsigned long zonesize)
  2810. {
  2811. unsigned long usemapsize = usemap_size(zonesize);
  2812. zone->pageblock_flags = NULL;
  2813. if (usemapsize) {
  2814. zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
  2815. memset(zone->pageblock_flags, 0, usemapsize);
  2816. }
  2817. }
  2818. #else
  2819. static void inline setup_usemap(struct pglist_data *pgdat,
  2820. struct zone *zone, unsigned long zonesize) {}
  2821. #endif /* CONFIG_SPARSEMEM */
  2822. /*
  2823. * Set up the zone data structures:
  2824. * - mark all pages reserved
  2825. * - mark all memory queues empty
  2826. * - clear the memory bitmaps
  2827. */
  2828. static void __meminit free_area_init_core(struct pglist_data *pgdat,
  2829. unsigned long *zones_size, unsigned long *zholes_size)
  2830. {
  2831. enum zone_type j;
  2832. int nid = pgdat->node_id;
  2833. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  2834. int ret;
  2835. pgdat_resize_init(pgdat);
  2836. pgdat->nr_zones = 0;
  2837. init_waitqueue_head(&pgdat->kswapd_wait);
  2838. pgdat->kswapd_max_order = 0;
  2839. for (j = 0; j < MAX_NR_ZONES; j++) {
  2840. struct zone *zone = pgdat->node_zones + j;
  2841. unsigned long size, realsize, memmap_pages;
  2842. size = zone_spanned_pages_in_node(nid, j, zones_size);
  2843. realsize = size - zone_absent_pages_in_node(nid, j,
  2844. zholes_size);
  2845. /*
  2846. * Adjust realsize so that it accounts for how much memory
  2847. * is used by this zone for memmap. This affects the watermark
  2848. * and per-cpu initialisations
  2849. */
  2850. memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
  2851. if (realsize >= memmap_pages) {
  2852. realsize -= memmap_pages;
  2853. printk(KERN_DEBUG
  2854. " %s zone: %lu pages used for memmap\n",
  2855. zone_names[j], memmap_pages);
  2856. } else
  2857. printk(KERN_WARNING
  2858. " %s zone: %lu pages exceeds realsize %lu\n",
  2859. zone_names[j], memmap_pages, realsize);
  2860. /* Account for reserved pages */
  2861. if (j == 0 && realsize > dma_reserve) {
  2862. realsize -= dma_reserve;
  2863. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  2864. zone_names[0], dma_reserve);
  2865. }
  2866. if (!is_highmem_idx(j))
  2867. nr_kernel_pages += realsize;
  2868. nr_all_pages += realsize;
  2869. zone->spanned_pages = size;
  2870. zone->present_pages = realsize;
  2871. #ifdef CONFIG_NUMA
  2872. zone->node = nid;
  2873. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  2874. / 100;
  2875. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  2876. #endif
  2877. zone->name = zone_names[j];
  2878. spin_lock_init(&zone->lock);
  2879. spin_lock_init(&zone->lru_lock);
  2880. zone_seqlock_init(zone);
  2881. zone->zone_pgdat = pgdat;
  2882. zone->prev_priority = DEF_PRIORITY;
  2883. zone_pcp_init(zone);
  2884. INIT_LIST_HEAD(&zone->active_list);
  2885. INIT_LIST_HEAD(&zone->inactive_list);
  2886. zone->nr_scan_active = 0;
  2887. zone->nr_scan_inactive = 0;
  2888. zap_zone_vm_stats(zone);
  2889. atomic_set(&zone->reclaim_in_progress, 0);
  2890. if (!size)
  2891. continue;
  2892. setup_usemap(pgdat, zone, size);
  2893. ret = init_currently_empty_zone(zone, zone_start_pfn,
  2894. size, MEMMAP_EARLY);
  2895. BUG_ON(ret);
  2896. zone_start_pfn += size;
  2897. }
  2898. }
  2899. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  2900. {
  2901. /* Skip empty nodes */
  2902. if (!pgdat->node_spanned_pages)
  2903. return;
  2904. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  2905. /* ia64 gets its own node_mem_map, before this, without bootmem */
  2906. if (!pgdat->node_mem_map) {
  2907. unsigned long size, start, end;
  2908. struct page *map;
  2909. /*
  2910. * The zone's endpoints aren't required to be MAX_ORDER
  2911. * aligned but the node_mem_map endpoints must be in order
  2912. * for the buddy allocator to function correctly.
  2913. */
  2914. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  2915. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  2916. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  2917. size = (end - start) * sizeof(struct page);
  2918. map = alloc_remap(pgdat->node_id, size);
  2919. if (!map)
  2920. map = alloc_bootmem_node(pgdat, size);
  2921. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  2922. }
  2923. #ifndef CONFIG_NEED_MULTIPLE_NODES
  2924. /*
  2925. * With no DISCONTIG, the global mem_map is just set as node 0's
  2926. */
  2927. if (pgdat == NODE_DATA(0)) {
  2928. mem_map = NODE_DATA(0)->node_mem_map;
  2929. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2930. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  2931. mem_map -= pgdat->node_start_pfn;
  2932. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  2933. }
  2934. #endif
  2935. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  2936. }
  2937. void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
  2938. unsigned long *zones_size, unsigned long node_start_pfn,
  2939. unsigned long *zholes_size)
  2940. {
  2941. pgdat->node_id = nid;
  2942. pgdat->node_start_pfn = node_start_pfn;
  2943. calculate_node_totalpages(pgdat, zones_size, zholes_size);
  2944. alloc_node_mem_map(pgdat);
  2945. free_area_init_core(pgdat, zones_size, zholes_size);
  2946. }
  2947. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2948. #if MAX_NUMNODES > 1
  2949. /*
  2950. * Figure out the number of possible node ids.
  2951. */
  2952. static void __init setup_nr_node_ids(void)
  2953. {
  2954. unsigned int node;
  2955. unsigned int highest = 0;
  2956. for_each_node_mask(node, node_possible_map)
  2957. highest = node;
  2958. nr_node_ids = highest + 1;
  2959. }
  2960. #else
  2961. static inline void setup_nr_node_ids(void)
  2962. {
  2963. }
  2964. #endif
  2965. /**
  2966. * add_active_range - Register a range of PFNs backed by physical memory
  2967. * @nid: The node ID the range resides on
  2968. * @start_pfn: The start PFN of the available physical memory
  2969. * @end_pfn: The end PFN of the available physical memory
  2970. *
  2971. * These ranges are stored in an early_node_map[] and later used by
  2972. * free_area_init_nodes() to calculate zone sizes and holes. If the
  2973. * range spans a memory hole, it is up to the architecture to ensure
  2974. * the memory is not freed by the bootmem allocator. If possible
  2975. * the range being registered will be merged with existing ranges.
  2976. */
  2977. void __init add_active_range(unsigned int nid, unsigned long start_pfn,
  2978. unsigned long end_pfn)
  2979. {
  2980. int i;
  2981. printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
  2982. "%d entries of %d used\n",
  2983. nid, start_pfn, end_pfn,
  2984. nr_nodemap_entries, MAX_ACTIVE_REGIONS);
  2985. /* Merge with existing active regions if possible */
  2986. for (i = 0; i < nr_nodemap_entries; i++) {
  2987. if (early_node_map[i].nid != nid)
  2988. continue;
  2989. /* Skip if an existing region covers this new one */
  2990. if (start_pfn >= early_node_map[i].start_pfn &&
  2991. end_pfn <= early_node_map[i].end_pfn)
  2992. return;
  2993. /* Merge forward if suitable */
  2994. if (start_pfn <= early_node_map[i].end_pfn &&
  2995. end_pfn > early_node_map[i].end_pfn) {
  2996. early_node_map[i].end_pfn = end_pfn;
  2997. return;
  2998. }
  2999. /* Merge backward if suitable */
  3000. if (start_pfn < early_node_map[i].end_pfn &&
  3001. end_pfn >= early_node_map[i].start_pfn) {
  3002. early_node_map[i].start_pfn = start_pfn;
  3003. return;
  3004. }
  3005. }
  3006. /* Check that early_node_map is large enough */
  3007. if (i >= MAX_ACTIVE_REGIONS) {
  3008. printk(KERN_CRIT "More than %d memory regions, truncating\n",
  3009. MAX_ACTIVE_REGIONS);
  3010. return;
  3011. }
  3012. early_node_map[i].nid = nid;
  3013. early_node_map[i].start_pfn = start_pfn;
  3014. early_node_map[i].end_pfn = end_pfn;
  3015. nr_nodemap_entries = i + 1;
  3016. }
  3017. /**
  3018. * shrink_active_range - Shrink an existing registered range of PFNs
  3019. * @nid: The node id the range is on that should be shrunk
  3020. * @old_end_pfn: The old end PFN of the range
  3021. * @new_end_pfn: The new PFN of the range
  3022. *
  3023. * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
  3024. * The map is kept at the end physical page range that has already been
  3025. * registered with add_active_range(). This function allows an arch to shrink
  3026. * an existing registered range.
  3027. */
  3028. void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
  3029. unsigned long new_end_pfn)
  3030. {
  3031. int i;
  3032. /* Find the old active region end and shrink */
  3033. for_each_active_range_index_in_nid(i, nid)
  3034. if (early_node_map[i].end_pfn == old_end_pfn) {
  3035. early_node_map[i].end_pfn = new_end_pfn;
  3036. break;
  3037. }
  3038. }
  3039. /**
  3040. * remove_all_active_ranges - Remove all currently registered regions
  3041. *
  3042. * During discovery, it may be found that a table like SRAT is invalid
  3043. * and an alternative discovery method must be used. This function removes
  3044. * all currently registered regions.
  3045. */
  3046. void __init remove_all_active_ranges(void)
  3047. {
  3048. memset(early_node_map, 0, sizeof(early_node_map));
  3049. nr_nodemap_entries = 0;
  3050. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  3051. memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
  3052. memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
  3053. #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
  3054. }
  3055. /* Compare two active node_active_regions */
  3056. static int __init cmp_node_active_region(const void *a, const void *b)
  3057. {
  3058. struct node_active_region *arange = (struct node_active_region *)a;
  3059. struct node_active_region *brange = (struct node_active_region *)b;
  3060. /* Done this way to avoid overflows */
  3061. if (arange->start_pfn > brange->start_pfn)
  3062. return 1;
  3063. if (arange->start_pfn < brange->start_pfn)
  3064. return -1;
  3065. return 0;
  3066. }
  3067. /* sort the node_map by start_pfn */
  3068. static void __init sort_node_map(void)
  3069. {
  3070. sort(early_node_map, (size_t)nr_nodemap_entries,
  3071. sizeof(struct node_active_region),
  3072. cmp_node_active_region, NULL);
  3073. }
  3074. /* Find the lowest pfn for a node */
  3075. unsigned long __init find_min_pfn_for_node(unsigned long nid)
  3076. {
  3077. int i;
  3078. unsigned long min_pfn = ULONG_MAX;
  3079. /* Assuming a sorted map, the first range found has the starting pfn */
  3080. for_each_active_range_index_in_nid(i, nid)
  3081. min_pfn = min(min_pfn, early_node_map[i].start_pfn);
  3082. if (min_pfn == ULONG_MAX) {
  3083. printk(KERN_WARNING
  3084. "Could not find start_pfn for node %lu\n", nid);
  3085. return 0;
  3086. }
  3087. return min_pfn;
  3088. }
  3089. /**
  3090. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  3091. *
  3092. * It returns the minimum PFN based on information provided via
  3093. * add_active_range().
  3094. */
  3095. unsigned long __init find_min_pfn_with_active_regions(void)
  3096. {
  3097. return find_min_pfn_for_node(MAX_NUMNODES);
  3098. }
  3099. /**
  3100. * find_max_pfn_with_active_regions - Find the maximum PFN registered
  3101. *
  3102. * It returns the maximum PFN based on information provided via
  3103. * add_active_range().
  3104. */
  3105. unsigned long __init find_max_pfn_with_active_regions(void)
  3106. {
  3107. int i;
  3108. unsigned long max_pfn = 0;
  3109. for (i = 0; i < nr_nodemap_entries; i++)
  3110. max_pfn = max(max_pfn, early_node_map[i].end_pfn);
  3111. return max_pfn;
  3112. }
  3113. /*
  3114. * early_calculate_totalpages()
  3115. * Sum pages in active regions for movable zone.
  3116. * Populate N_HIGH_MEMORY for calculating usable_nodes.
  3117. */
  3118. unsigned long __init early_calculate_totalpages(void)
  3119. {
  3120. int i;
  3121. unsigned long totalpages = 0;
  3122. for (i = 0; i < nr_nodemap_entries; i++) {
  3123. unsigned long pages = early_node_map[i].end_pfn -
  3124. early_node_map[i].start_pfn;
  3125. totalpages += pages;
  3126. if (pages)
  3127. node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
  3128. }
  3129. return totalpages;
  3130. }
  3131. /*
  3132. * Find the PFN the Movable zone begins in each node. Kernel memory
  3133. * is spread evenly between nodes as long as the nodes have enough
  3134. * memory. When they don't, some nodes will have more kernelcore than
  3135. * others
  3136. */
  3137. void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
  3138. {
  3139. int i, nid;
  3140. unsigned long usable_startpfn;
  3141. unsigned long kernelcore_node, kernelcore_remaining;
  3142. unsigned long totalpages = early_calculate_totalpages();
  3143. int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  3144. /*
  3145. * If movablecore was specified, calculate what size of
  3146. * kernelcore that corresponds so that memory usable for
  3147. * any allocation type is evenly spread. If both kernelcore
  3148. * and movablecore are specified, then the value of kernelcore
  3149. * will be used for required_kernelcore if it's greater than
  3150. * what movablecore would have allowed.
  3151. */
  3152. if (required_movablecore) {
  3153. unsigned long corepages;
  3154. /*
  3155. * Round-up so that ZONE_MOVABLE is at least as large as what
  3156. * was requested by the user
  3157. */
  3158. required_movablecore =
  3159. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  3160. corepages = totalpages - required_movablecore;
  3161. required_kernelcore = max(required_kernelcore, corepages);
  3162. }
  3163. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  3164. if (!required_kernelcore)
  3165. return;
  3166. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  3167. find_usable_zone_for_movable();
  3168. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  3169. restart:
  3170. /* Spread kernelcore memory as evenly as possible throughout nodes */
  3171. kernelcore_node = required_kernelcore / usable_nodes;
  3172. for_each_node_state(nid, N_HIGH_MEMORY) {
  3173. /*
  3174. * Recalculate kernelcore_node if the division per node
  3175. * now exceeds what is necessary to satisfy the requested
  3176. * amount of memory for the kernel
  3177. */
  3178. if (required_kernelcore < kernelcore_node)
  3179. kernelcore_node = required_kernelcore / usable_nodes;
  3180. /*
  3181. * As the map is walked, we track how much memory is usable
  3182. * by the kernel using kernelcore_remaining. When it is
  3183. * 0, the rest of the node is usable by ZONE_MOVABLE
  3184. */
  3185. kernelcore_remaining = kernelcore_node;
  3186. /* Go through each range of PFNs within this node */
  3187. for_each_active_range_index_in_nid(i, nid) {
  3188. unsigned long start_pfn, end_pfn;
  3189. unsigned long size_pages;
  3190. start_pfn = max(early_node_map[i].start_pfn,
  3191. zone_movable_pfn[nid]);
  3192. end_pfn = early_node_map[i].end_pfn;
  3193. if (start_pfn >= end_pfn)
  3194. continue;
  3195. /* Account for what is only usable for kernelcore */
  3196. if (start_pfn < usable_startpfn) {
  3197. unsigned long kernel_pages;
  3198. kernel_pages = min(end_pfn, usable_startpfn)
  3199. - start_pfn;
  3200. kernelcore_remaining -= min(kernel_pages,
  3201. kernelcore_remaining);
  3202. required_kernelcore -= min(kernel_pages,
  3203. required_kernelcore);
  3204. /* Continue if range is now fully accounted */
  3205. if (end_pfn <= usable_startpfn) {
  3206. /*
  3207. * Push zone_movable_pfn to the end so
  3208. * that if we have to rebalance
  3209. * kernelcore across nodes, we will
  3210. * not double account here
  3211. */
  3212. zone_movable_pfn[nid] = end_pfn;
  3213. continue;
  3214. }
  3215. start_pfn = usable_startpfn;
  3216. }
  3217. /*
  3218. * The usable PFN range for ZONE_MOVABLE is from
  3219. * start_pfn->end_pfn. Calculate size_pages as the
  3220. * number of pages used as kernelcore
  3221. */
  3222. size_pages = end_pfn - start_pfn;
  3223. if (size_pages > kernelcore_remaining)
  3224. size_pages = kernelcore_remaining;
  3225. zone_movable_pfn[nid] = start_pfn + size_pages;
  3226. /*
  3227. * Some kernelcore has been met, update counts and
  3228. * break if the kernelcore for this node has been
  3229. * satisified
  3230. */
  3231. required_kernelcore -= min(required_kernelcore,
  3232. size_pages);
  3233. kernelcore_remaining -= size_pages;
  3234. if (!kernelcore_remaining)
  3235. break;
  3236. }
  3237. }
  3238. /*
  3239. * If there is still required_kernelcore, we do another pass with one
  3240. * less node in the count. This will push zone_movable_pfn[nid] further
  3241. * along on the nodes that still have memory until kernelcore is
  3242. * satisified
  3243. */
  3244. usable_nodes--;
  3245. if (usable_nodes && required_kernelcore > usable_nodes)
  3246. goto restart;
  3247. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  3248. for (nid = 0; nid < MAX_NUMNODES; nid++)
  3249. zone_movable_pfn[nid] =
  3250. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  3251. }
  3252. /* Any regular memory on that node ? */
  3253. static void check_for_regular_memory(pg_data_t *pgdat)
  3254. {
  3255. #ifdef CONFIG_HIGHMEM
  3256. enum zone_type zone_type;
  3257. for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
  3258. struct zone *zone = &pgdat->node_zones[zone_type];
  3259. if (zone->present_pages)
  3260. node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
  3261. }
  3262. #endif
  3263. }
  3264. /**
  3265. * free_area_init_nodes - Initialise all pg_data_t and zone data
  3266. * @max_zone_pfn: an array of max PFNs for each zone
  3267. *
  3268. * This will call free_area_init_node() for each active node in the system.
  3269. * Using the page ranges provided by add_active_range(), the size of each
  3270. * zone in each node and their holes is calculated. If the maximum PFN
  3271. * between two adjacent zones match, it is assumed that the zone is empty.
  3272. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  3273. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  3274. * starts where the previous one ended. For example, ZONE_DMA32 starts
  3275. * at arch_max_dma_pfn.
  3276. */
  3277. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  3278. {
  3279. unsigned long nid;
  3280. enum zone_type i;
  3281. /* Sort early_node_map as initialisation assumes it is sorted */
  3282. sort_node_map();
  3283. /* Record where the zone boundaries are */
  3284. memset(arch_zone_lowest_possible_pfn, 0,
  3285. sizeof(arch_zone_lowest_possible_pfn));
  3286. memset(arch_zone_highest_possible_pfn, 0,
  3287. sizeof(arch_zone_highest_possible_pfn));
  3288. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  3289. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  3290. for (i = 1; i < MAX_NR_ZONES; i++) {
  3291. if (i == ZONE_MOVABLE)
  3292. continue;
  3293. arch_zone_lowest_possible_pfn[i] =
  3294. arch_zone_highest_possible_pfn[i-1];
  3295. arch_zone_highest_possible_pfn[i] =
  3296. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  3297. }
  3298. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  3299. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  3300. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  3301. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  3302. find_zone_movable_pfns_for_nodes(zone_movable_pfn);
  3303. /* Print out the zone ranges */
  3304. printk("Zone PFN ranges:\n");
  3305. for (i = 0; i < MAX_NR_ZONES; i++) {
  3306. if (i == ZONE_MOVABLE)
  3307. continue;
  3308. printk(" %-8s %8lu -> %8lu\n",
  3309. zone_names[i],
  3310. arch_zone_lowest_possible_pfn[i],
  3311. arch_zone_highest_possible_pfn[i]);
  3312. }
  3313. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  3314. printk("Movable zone start PFN for each node\n");
  3315. for (i = 0; i < MAX_NUMNODES; i++) {
  3316. if (zone_movable_pfn[i])
  3317. printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
  3318. }
  3319. /* Print out the early_node_map[] */
  3320. printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
  3321. for (i = 0; i < nr_nodemap_entries; i++)
  3322. printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
  3323. early_node_map[i].start_pfn,
  3324. early_node_map[i].end_pfn);
  3325. /* Initialise every node */
  3326. setup_nr_node_ids();
  3327. for_each_online_node(nid) {
  3328. pg_data_t *pgdat = NODE_DATA(nid);
  3329. free_area_init_node(nid, pgdat, NULL,
  3330. find_min_pfn_for_node(nid), NULL);
  3331. /* Any memory on that node */
  3332. if (pgdat->node_present_pages)
  3333. node_set_state(nid, N_HIGH_MEMORY);
  3334. check_for_regular_memory(pgdat);
  3335. }
  3336. }
  3337. static int __init cmdline_parse_core(char *p, unsigned long *core)
  3338. {
  3339. unsigned long long coremem;
  3340. if (!p)
  3341. return -EINVAL;
  3342. coremem = memparse(p, &p);
  3343. *core = coremem >> PAGE_SHIFT;
  3344. /* Paranoid check that UL is enough for the coremem value */
  3345. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  3346. return 0;
  3347. }
  3348. /*
  3349. * kernelcore=size sets the amount of memory for use for allocations that
  3350. * cannot be reclaimed or migrated.
  3351. */
  3352. static int __init cmdline_parse_kernelcore(char *p)
  3353. {
  3354. return cmdline_parse_core(p, &required_kernelcore);
  3355. }
  3356. /*
  3357. * movablecore=size sets the amount of memory for use for allocations that
  3358. * can be reclaimed or migrated.
  3359. */
  3360. static int __init cmdline_parse_movablecore(char *p)
  3361. {
  3362. return cmdline_parse_core(p, &required_movablecore);
  3363. }
  3364. early_param("kernelcore", cmdline_parse_kernelcore);
  3365. early_param("movablecore", cmdline_parse_movablecore);
  3366. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  3367. /**
  3368. * set_dma_reserve - set the specified number of pages reserved in the first zone
  3369. * @new_dma_reserve: The number of pages to mark reserved
  3370. *
  3371. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  3372. * In the DMA zone, a significant percentage may be consumed by kernel image
  3373. * and other unfreeable allocations which can skew the watermarks badly. This
  3374. * function may optionally be used to account for unfreeable pages in the
  3375. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  3376. * smaller per-cpu batchsize.
  3377. */
  3378. void __init set_dma_reserve(unsigned long new_dma_reserve)
  3379. {
  3380. dma_reserve = new_dma_reserve;
  3381. }
  3382. #ifndef CONFIG_NEED_MULTIPLE_NODES
  3383. static bootmem_data_t contig_bootmem_data;
  3384. struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
  3385. EXPORT_SYMBOL(contig_page_data);
  3386. #endif
  3387. void __init free_area_init(unsigned long *zones_size)
  3388. {
  3389. free_area_init_node(0, NODE_DATA(0), zones_size,
  3390. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  3391. }
  3392. static int page_alloc_cpu_notify(struct notifier_block *self,
  3393. unsigned long action, void *hcpu)
  3394. {
  3395. int cpu = (unsigned long)hcpu;
  3396. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  3397. local_irq_disable();
  3398. __drain_pages(cpu);
  3399. vm_events_fold_cpu(cpu);
  3400. local_irq_enable();
  3401. refresh_cpu_vm_stats(cpu);
  3402. }
  3403. return NOTIFY_OK;
  3404. }
  3405. void __init page_alloc_init(void)
  3406. {
  3407. hotcpu_notifier(page_alloc_cpu_notify, 0);
  3408. }
  3409. /*
  3410. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  3411. * or min_free_kbytes changes.
  3412. */
  3413. static void calculate_totalreserve_pages(void)
  3414. {
  3415. struct pglist_data *pgdat;
  3416. unsigned long reserve_pages = 0;
  3417. enum zone_type i, j;
  3418. for_each_online_pgdat(pgdat) {
  3419. for (i = 0; i < MAX_NR_ZONES; i++) {
  3420. struct zone *zone = pgdat->node_zones + i;
  3421. unsigned long max = 0;
  3422. /* Find valid and maximum lowmem_reserve in the zone */
  3423. for (j = i; j < MAX_NR_ZONES; j++) {
  3424. if (zone->lowmem_reserve[j] > max)
  3425. max = zone->lowmem_reserve[j];
  3426. }
  3427. /* we treat pages_high as reserved pages. */
  3428. max += zone->pages_high;
  3429. if (max > zone->present_pages)
  3430. max = zone->present_pages;
  3431. reserve_pages += max;
  3432. }
  3433. }
  3434. totalreserve_pages = reserve_pages;
  3435. }
  3436. /*
  3437. * setup_per_zone_lowmem_reserve - called whenever
  3438. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  3439. * has a correct pages reserved value, so an adequate number of
  3440. * pages are left in the zone after a successful __alloc_pages().
  3441. */
  3442. static void setup_per_zone_lowmem_reserve(void)
  3443. {
  3444. struct pglist_data *pgdat;
  3445. enum zone_type j, idx;
  3446. for_each_online_pgdat(pgdat) {
  3447. for (j = 0; j < MAX_NR_ZONES; j++) {
  3448. struct zone *zone = pgdat->node_zones + j;
  3449. unsigned long present_pages = zone->present_pages;
  3450. zone->lowmem_reserve[j] = 0;
  3451. idx = j;
  3452. while (idx) {
  3453. struct zone *lower_zone;
  3454. idx--;
  3455. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  3456. sysctl_lowmem_reserve_ratio[idx] = 1;
  3457. lower_zone = pgdat->node_zones + idx;
  3458. lower_zone->lowmem_reserve[j] = present_pages /
  3459. sysctl_lowmem_reserve_ratio[idx];
  3460. present_pages += lower_zone->present_pages;
  3461. }
  3462. }
  3463. }
  3464. /* update totalreserve_pages */
  3465. calculate_totalreserve_pages();
  3466. }
  3467. /**
  3468. * setup_per_zone_pages_min - called when min_free_kbytes changes.
  3469. *
  3470. * Ensures that the pages_{min,low,high} values for each zone are set correctly
  3471. * with respect to min_free_kbytes.
  3472. */
  3473. void setup_per_zone_pages_min(void)
  3474. {
  3475. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  3476. unsigned long lowmem_pages = 0;
  3477. struct zone *zone;
  3478. unsigned long flags;
  3479. /* Calculate total number of !ZONE_HIGHMEM pages */
  3480. for_each_zone(zone) {
  3481. if (!is_highmem(zone))
  3482. lowmem_pages += zone->present_pages;
  3483. }
  3484. for_each_zone(zone) {
  3485. u64 tmp;
  3486. spin_lock_irqsave(&zone->lru_lock, flags);
  3487. tmp = (u64)pages_min * zone->present_pages;
  3488. do_div(tmp, lowmem_pages);
  3489. if (is_highmem(zone)) {
  3490. /*
  3491. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  3492. * need highmem pages, so cap pages_min to a small
  3493. * value here.
  3494. *
  3495. * The (pages_high-pages_low) and (pages_low-pages_min)
  3496. * deltas controls asynch page reclaim, and so should
  3497. * not be capped for highmem.
  3498. */
  3499. int min_pages;
  3500. min_pages = zone->present_pages / 1024;
  3501. if (min_pages < SWAP_CLUSTER_MAX)
  3502. min_pages = SWAP_CLUSTER_MAX;
  3503. if (min_pages > 128)
  3504. min_pages = 128;
  3505. zone->pages_min = min_pages;
  3506. } else {
  3507. /*
  3508. * If it's a lowmem zone, reserve a number of pages
  3509. * proportionate to the zone's size.
  3510. */
  3511. zone->pages_min = tmp;
  3512. }
  3513. zone->pages_low = zone->pages_min + (tmp >> 2);
  3514. zone->pages_high = zone->pages_min + (tmp >> 1);
  3515. spin_unlock_irqrestore(&zone->lru_lock, flags);
  3516. }
  3517. /* update totalreserve_pages */
  3518. calculate_totalreserve_pages();
  3519. }
  3520. /*
  3521. * Initialise min_free_kbytes.
  3522. *
  3523. * For small machines we want it small (128k min). For large machines
  3524. * we want it large (64MB max). But it is not linear, because network
  3525. * bandwidth does not increase linearly with machine size. We use
  3526. *
  3527. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  3528. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  3529. *
  3530. * which yields
  3531. *
  3532. * 16MB: 512k
  3533. * 32MB: 724k
  3534. * 64MB: 1024k
  3535. * 128MB: 1448k
  3536. * 256MB: 2048k
  3537. * 512MB: 2896k
  3538. * 1024MB: 4096k
  3539. * 2048MB: 5792k
  3540. * 4096MB: 8192k
  3541. * 8192MB: 11584k
  3542. * 16384MB: 16384k
  3543. */
  3544. static int __init init_per_zone_pages_min(void)
  3545. {
  3546. unsigned long lowmem_kbytes;
  3547. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  3548. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  3549. if (min_free_kbytes < 128)
  3550. min_free_kbytes = 128;
  3551. if (min_free_kbytes > 65536)
  3552. min_free_kbytes = 65536;
  3553. setup_per_zone_pages_min();
  3554. setup_per_zone_lowmem_reserve();
  3555. return 0;
  3556. }
  3557. module_init(init_per_zone_pages_min)
  3558. /*
  3559. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  3560. * that we can call two helper functions whenever min_free_kbytes
  3561. * changes.
  3562. */
  3563. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  3564. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3565. {
  3566. proc_dointvec(table, write, file, buffer, length, ppos);
  3567. if (write)
  3568. setup_per_zone_pages_min();
  3569. return 0;
  3570. }
  3571. #ifdef CONFIG_NUMA
  3572. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  3573. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3574. {
  3575. struct zone *zone;
  3576. int rc;
  3577. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3578. if (rc)
  3579. return rc;
  3580. for_each_zone(zone)
  3581. zone->min_unmapped_pages = (zone->present_pages *
  3582. sysctl_min_unmapped_ratio) / 100;
  3583. return 0;
  3584. }
  3585. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  3586. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3587. {
  3588. struct zone *zone;
  3589. int rc;
  3590. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3591. if (rc)
  3592. return rc;
  3593. for_each_zone(zone)
  3594. zone->min_slab_pages = (zone->present_pages *
  3595. sysctl_min_slab_ratio) / 100;
  3596. return 0;
  3597. }
  3598. #endif
  3599. /*
  3600. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  3601. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  3602. * whenever sysctl_lowmem_reserve_ratio changes.
  3603. *
  3604. * The reserve ratio obviously has absolutely no relation with the
  3605. * pages_min watermarks. The lowmem reserve ratio can only make sense
  3606. * if in function of the boot time zone sizes.
  3607. */
  3608. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  3609. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3610. {
  3611. proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3612. setup_per_zone_lowmem_reserve();
  3613. return 0;
  3614. }
  3615. /*
  3616. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  3617. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  3618. * can have before it gets flushed back to buddy allocator.
  3619. */
  3620. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  3621. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  3622. {
  3623. struct zone *zone;
  3624. unsigned int cpu;
  3625. int ret;
  3626. ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  3627. if (!write || (ret == -EINVAL))
  3628. return ret;
  3629. for_each_zone(zone) {
  3630. for_each_online_cpu(cpu) {
  3631. unsigned long high;
  3632. high = zone->present_pages / percpu_pagelist_fraction;
  3633. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  3634. }
  3635. }
  3636. return 0;
  3637. }
  3638. int hashdist = HASHDIST_DEFAULT;
  3639. #ifdef CONFIG_NUMA
  3640. static int __init set_hashdist(char *str)
  3641. {
  3642. if (!str)
  3643. return 0;
  3644. hashdist = simple_strtoul(str, &str, 0);
  3645. return 1;
  3646. }
  3647. __setup("hashdist=", set_hashdist);
  3648. #endif
  3649. /*
  3650. * allocate a large system hash table from bootmem
  3651. * - it is assumed that the hash table must contain an exact power-of-2
  3652. * quantity of entries
  3653. * - limit is the number of hash buckets, not the total allocation size
  3654. */
  3655. void *__init alloc_large_system_hash(const char *tablename,
  3656. unsigned long bucketsize,
  3657. unsigned long numentries,
  3658. int scale,
  3659. int flags,
  3660. unsigned int *_hash_shift,
  3661. unsigned int *_hash_mask,
  3662. unsigned long limit)
  3663. {
  3664. unsigned long long max = limit;
  3665. unsigned long log2qty, size;
  3666. void *table = NULL;
  3667. /* allow the kernel cmdline to have a say */
  3668. if (!numentries) {
  3669. /* round applicable memory size up to nearest megabyte */
  3670. numentries = nr_kernel_pages;
  3671. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  3672. numentries >>= 20 - PAGE_SHIFT;
  3673. numentries <<= 20 - PAGE_SHIFT;
  3674. /* limit to 1 bucket per 2^scale bytes of low memory */
  3675. if (scale > PAGE_SHIFT)
  3676. numentries >>= (scale - PAGE_SHIFT);
  3677. else
  3678. numentries <<= (PAGE_SHIFT - scale);
  3679. /* Make sure we've got at least a 0-order allocation.. */
  3680. if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  3681. numentries = PAGE_SIZE / bucketsize;
  3682. }
  3683. numentries = roundup_pow_of_two(numentries);
  3684. /* limit allocation size to 1/16 total memory by default */
  3685. if (max == 0) {
  3686. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  3687. do_div(max, bucketsize);
  3688. }
  3689. if (numentries > max)
  3690. numentries = max;
  3691. log2qty = ilog2(numentries);
  3692. do {
  3693. size = bucketsize << log2qty;
  3694. if (flags & HASH_EARLY)
  3695. table = alloc_bootmem(size);
  3696. else if (hashdist)
  3697. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  3698. else {
  3699. unsigned long order;
  3700. for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
  3701. ;
  3702. table = (void*) __get_free_pages(GFP_ATOMIC, order);
  3703. /*
  3704. * If bucketsize is not a power-of-two, we may free
  3705. * some pages at the end of hash table.
  3706. */
  3707. if (table) {
  3708. unsigned long alloc_end = (unsigned long)table +
  3709. (PAGE_SIZE << order);
  3710. unsigned long used = (unsigned long)table +
  3711. PAGE_ALIGN(size);
  3712. split_page(virt_to_page(table), order);
  3713. while (used < alloc_end) {
  3714. free_page(used);
  3715. used += PAGE_SIZE;
  3716. }
  3717. }
  3718. }
  3719. } while (!table && size > PAGE_SIZE && --log2qty);
  3720. if (!table)
  3721. panic("Failed to allocate %s hash table\n", tablename);
  3722. printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
  3723. tablename,
  3724. (1U << log2qty),
  3725. ilog2(size) - PAGE_SHIFT,
  3726. size);
  3727. if (_hash_shift)
  3728. *_hash_shift = log2qty;
  3729. if (_hash_mask)
  3730. *_hash_mask = (1 << log2qty) - 1;
  3731. return table;
  3732. }
  3733. #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
  3734. struct page *pfn_to_page(unsigned long pfn)
  3735. {
  3736. return __pfn_to_page(pfn);
  3737. }
  3738. unsigned long page_to_pfn(struct page *page)
  3739. {
  3740. return __page_to_pfn(page);
  3741. }
  3742. EXPORT_SYMBOL(pfn_to_page);
  3743. EXPORT_SYMBOL(page_to_pfn);
  3744. #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
  3745. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  3746. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  3747. unsigned long pfn)
  3748. {
  3749. #ifdef CONFIG_SPARSEMEM
  3750. return __pfn_to_section(pfn)->pageblock_flags;
  3751. #else
  3752. return zone->pageblock_flags;
  3753. #endif /* CONFIG_SPARSEMEM */
  3754. }
  3755. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  3756. {
  3757. #ifdef CONFIG_SPARSEMEM
  3758. pfn &= (PAGES_PER_SECTION-1);
  3759. return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
  3760. #else
  3761. pfn = pfn - zone->zone_start_pfn;
  3762. return (pfn >> (MAX_ORDER-1)) * NR_PAGEBLOCK_BITS;
  3763. #endif /* CONFIG_SPARSEMEM */
  3764. }
  3765. /**
  3766. * get_pageblock_flags_group - Return the requested group of flags for the MAX_ORDER_NR_PAGES block of pages
  3767. * @page: The page within the block of interest
  3768. * @start_bitidx: The first bit of interest to retrieve
  3769. * @end_bitidx: The last bit of interest
  3770. * returns pageblock_bits flags
  3771. */
  3772. unsigned long get_pageblock_flags_group(struct page *page,
  3773. int start_bitidx, int end_bitidx)
  3774. {
  3775. struct zone *zone;
  3776. unsigned long *bitmap;
  3777. unsigned long pfn, bitidx;
  3778. unsigned long flags = 0;
  3779. unsigned long value = 1;
  3780. zone = page_zone(page);
  3781. pfn = page_to_pfn(page);
  3782. bitmap = get_pageblock_bitmap(zone, pfn);
  3783. bitidx = pfn_to_bitidx(zone, pfn);
  3784. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  3785. if (test_bit(bitidx + start_bitidx, bitmap))
  3786. flags |= value;
  3787. return flags;
  3788. }
  3789. /**
  3790. * set_pageblock_flags_group - Set the requested group of flags for a MAX_ORDER_NR_PAGES block of pages
  3791. * @page: The page within the block of interest
  3792. * @start_bitidx: The first bit of interest
  3793. * @end_bitidx: The last bit of interest
  3794. * @flags: The flags to set
  3795. */
  3796. void set_pageblock_flags_group(struct page *page, unsigned long flags,
  3797. int start_bitidx, int end_bitidx)
  3798. {
  3799. struct zone *zone;
  3800. unsigned long *bitmap;
  3801. unsigned long pfn, bitidx;
  3802. unsigned long value = 1;
  3803. zone = page_zone(page);
  3804. pfn = page_to_pfn(page);
  3805. bitmap = get_pageblock_bitmap(zone, pfn);
  3806. bitidx = pfn_to_bitidx(zone, pfn);
  3807. for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
  3808. if (flags & value)
  3809. __set_bit(bitidx + start_bitidx, bitmap);
  3810. else
  3811. __clear_bit(bitidx + start_bitidx, bitmap);
  3812. }