page_alloc.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/compiler.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/suspend.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/slab.h>
  29. #include <linux/notifier.h>
  30. #include <linux/topology.h>
  31. #include <linux/sysctl.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/memory_hotplug.h>
  35. #include <linux/nodemask.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/mempolicy.h>
  38. #include <linux/stop_machine.h>
  39. #include <linux/sort.h>
  40. #include <linux/pfn.h>
  41. #include <linux/backing-dev.h>
  42. #include <linux/fault-inject.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/div64.h>
  45. #include "internal.h"
  46. /*
  47. * MCD - HACK: Find somewhere to initialize this EARLY, or make this
  48. * initializer cleaner
  49. */
  50. nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
  51. EXPORT_SYMBOL(node_online_map);
  52. nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
  53. EXPORT_SYMBOL(node_possible_map);
  54. unsigned long totalram_pages __read_mostly;
  55. unsigned long totalreserve_pages __read_mostly;
  56. long nr_swap_pages;
  57. int percpu_pagelist_fraction;
  58. static void __free_pages_ok(struct page *page, unsigned int order);
  59. /*
  60. * results with 256, 32 in the lowmem_reserve sysctl:
  61. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  62. * 1G machine -> (16M dma, 784M normal, 224M high)
  63. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  64. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  65. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  66. *
  67. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  68. * don't need any ZONE_NORMAL reservation
  69. */
  70. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  71. 256,
  72. #ifdef CONFIG_ZONE_DMA32
  73. 256,
  74. #endif
  75. #ifdef CONFIG_HIGHMEM
  76. 32
  77. #endif
  78. };
  79. EXPORT_SYMBOL(totalram_pages);
  80. static char * const zone_names[MAX_NR_ZONES] = {
  81. "DMA",
  82. #ifdef CONFIG_ZONE_DMA32
  83. "DMA32",
  84. #endif
  85. "Normal",
  86. #ifdef CONFIG_HIGHMEM
  87. "HighMem"
  88. #endif
  89. };
  90. int min_free_kbytes = 1024;
  91. unsigned long __meminitdata nr_kernel_pages;
  92. unsigned long __meminitdata nr_all_pages;
  93. static unsigned long __initdata dma_reserve;
  94. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  95. /*
  96. * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
  97. * ranges of memory (RAM) that may be registered with add_active_range().
  98. * Ranges passed to add_active_range() will be merged if possible
  99. * so the number of times add_active_range() can be called is
  100. * related to the number of nodes and the number of holes
  101. */
  102. #ifdef CONFIG_MAX_ACTIVE_REGIONS
  103. /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
  104. #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  105. #else
  106. #if MAX_NUMNODES >= 32
  107. /* If there can be many nodes, allow up to 50 holes per node */
  108. #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
  109. #else
  110. /* By default, allow up to 256 distinct regions */
  111. #define MAX_ACTIVE_REGIONS 256
  112. #endif
  113. #endif
  114. struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS];
  115. int __initdata nr_nodemap_entries;
  116. unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  117. unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  118. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  119. unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
  120. unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
  121. #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
  122. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  123. #ifdef CONFIG_DEBUG_VM
  124. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  125. {
  126. int ret = 0;
  127. unsigned seq;
  128. unsigned long pfn = page_to_pfn(page);
  129. do {
  130. seq = zone_span_seqbegin(zone);
  131. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  132. ret = 1;
  133. else if (pfn < zone->zone_start_pfn)
  134. ret = 1;
  135. } while (zone_span_seqretry(zone, seq));
  136. return ret;
  137. }
  138. static int page_is_consistent(struct zone *zone, struct page *page)
  139. {
  140. #ifdef CONFIG_HOLES_IN_ZONE
  141. if (!pfn_valid(page_to_pfn(page)))
  142. return 0;
  143. #endif
  144. if (zone != page_zone(page))
  145. return 0;
  146. return 1;
  147. }
  148. /*
  149. * Temporary debugging check for pages not lying within a given zone.
  150. */
  151. static int bad_range(struct zone *zone, struct page *page)
  152. {
  153. if (page_outside_zone_boundaries(zone, page))
  154. return 1;
  155. if (!page_is_consistent(zone, page))
  156. return 1;
  157. return 0;
  158. }
  159. #else
  160. static inline int bad_range(struct zone *zone, struct page *page)
  161. {
  162. return 0;
  163. }
  164. #endif
  165. static void bad_page(struct page *page)
  166. {
  167. printk(KERN_EMERG "Bad page state in process '%s'\n"
  168. KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
  169. KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
  170. KERN_EMERG "Backtrace:\n",
  171. current->comm, page, (int)(2*sizeof(unsigned long)),
  172. (unsigned long)page->flags, page->mapping,
  173. page_mapcount(page), page_count(page));
  174. dump_stack();
  175. page->flags &= ~(1 << PG_lru |
  176. 1 << PG_private |
  177. 1 << PG_locked |
  178. 1 << PG_active |
  179. 1 << PG_dirty |
  180. 1 << PG_reclaim |
  181. 1 << PG_slab |
  182. 1 << PG_swapcache |
  183. 1 << PG_writeback |
  184. 1 << PG_buddy );
  185. set_page_count(page, 0);
  186. reset_page_mapcount(page);
  187. page->mapping = NULL;
  188. add_taint(TAINT_BAD_PAGE);
  189. }
  190. /*
  191. * Higher-order pages are called "compound pages". They are structured thusly:
  192. *
  193. * The first PAGE_SIZE page is called the "head page".
  194. *
  195. * The remaining PAGE_SIZE pages are called "tail pages".
  196. *
  197. * All pages have PG_compound set. All pages have their ->private pointing at
  198. * the head page (even the head page has this).
  199. *
  200. * The first tail page's ->lru.next holds the address of the compound page's
  201. * put_page() function. Its ->lru.prev holds the order of allocation.
  202. * This usage means that zero-order pages may not be compound.
  203. */
  204. static void free_compound_page(struct page *page)
  205. {
  206. __free_pages_ok(page, (unsigned long)page[1].lru.prev);
  207. }
  208. static void prep_compound_page(struct page *page, unsigned long order)
  209. {
  210. int i;
  211. int nr_pages = 1 << order;
  212. set_compound_page_dtor(page, free_compound_page);
  213. page[1].lru.prev = (void *)order;
  214. for (i = 0; i < nr_pages; i++) {
  215. struct page *p = page + i;
  216. __SetPageCompound(p);
  217. set_page_private(p, (unsigned long)page);
  218. }
  219. }
  220. static void destroy_compound_page(struct page *page, unsigned long order)
  221. {
  222. int i;
  223. int nr_pages = 1 << order;
  224. if (unlikely((unsigned long)page[1].lru.prev != order))
  225. bad_page(page);
  226. for (i = 0; i < nr_pages; i++) {
  227. struct page *p = page + i;
  228. if (unlikely(!PageCompound(p) |
  229. (page_private(p) != (unsigned long)page)))
  230. bad_page(page);
  231. __ClearPageCompound(p);
  232. }
  233. }
  234. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  235. {
  236. int i;
  237. VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
  238. /*
  239. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  240. * and __GFP_HIGHMEM from hard or soft interrupt context.
  241. */
  242. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  243. for (i = 0; i < (1 << order); i++)
  244. clear_highpage(page + i);
  245. }
  246. /*
  247. * function for dealing with page's order in buddy system.
  248. * zone->lock is already acquired when we use these.
  249. * So, we don't need atomic page->flags operations here.
  250. */
  251. static inline unsigned long page_order(struct page *page)
  252. {
  253. return page_private(page);
  254. }
  255. static inline void set_page_order(struct page *page, int order)
  256. {
  257. set_page_private(page, order);
  258. __SetPageBuddy(page);
  259. }
  260. static inline void rmv_page_order(struct page *page)
  261. {
  262. __ClearPageBuddy(page);
  263. set_page_private(page, 0);
  264. }
  265. /*
  266. * Locate the struct page for both the matching buddy in our
  267. * pair (buddy1) and the combined O(n+1) page they form (page).
  268. *
  269. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  270. * the following equation:
  271. * B2 = B1 ^ (1 << O)
  272. * For example, if the starting buddy (buddy2) is #8 its order
  273. * 1 buddy is #10:
  274. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  275. *
  276. * 2) Any buddy B will have an order O+1 parent P which
  277. * satisfies the following equation:
  278. * P = B & ~(1 << O)
  279. *
  280. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  281. */
  282. static inline struct page *
  283. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  284. {
  285. unsigned long buddy_idx = page_idx ^ (1 << order);
  286. return page + (buddy_idx - page_idx);
  287. }
  288. static inline unsigned long
  289. __find_combined_index(unsigned long page_idx, unsigned int order)
  290. {
  291. return (page_idx & ~(1 << order));
  292. }
  293. /*
  294. * This function checks whether a page is free && is the buddy
  295. * we can do coalesce a page and its buddy if
  296. * (a) the buddy is not in a hole &&
  297. * (b) the buddy is in the buddy system &&
  298. * (c) a page and its buddy have the same order &&
  299. * (d) a page and its buddy are in the same zone.
  300. *
  301. * For recording whether a page is in the buddy system, we use PG_buddy.
  302. * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
  303. *
  304. * For recording page's order, we use page_private(page).
  305. */
  306. static inline int page_is_buddy(struct page *page, struct page *buddy,
  307. int order)
  308. {
  309. #ifdef CONFIG_HOLES_IN_ZONE
  310. if (!pfn_valid(page_to_pfn(buddy)))
  311. return 0;
  312. #endif
  313. if (page_zone_id(page) != page_zone_id(buddy))
  314. return 0;
  315. if (PageBuddy(buddy) && page_order(buddy) == order) {
  316. BUG_ON(page_count(buddy) != 0);
  317. return 1;
  318. }
  319. return 0;
  320. }
  321. /*
  322. * Freeing function for a buddy system allocator.
  323. *
  324. * The concept of a buddy system is to maintain direct-mapped table
  325. * (containing bit values) for memory blocks of various "orders".
  326. * The bottom level table contains the map for the smallest allocatable
  327. * units of memory (here, pages), and each level above it describes
  328. * pairs of units from the levels below, hence, "buddies".
  329. * At a high level, all that happens here is marking the table entry
  330. * at the bottom level available, and propagating the changes upward
  331. * as necessary, plus some accounting needed to play nicely with other
  332. * parts of the VM system.
  333. * At each level, we keep a list of pages, which are heads of continuous
  334. * free pages of length of (1 << order) and marked with PG_buddy. Page's
  335. * order is recorded in page_private(page) field.
  336. * So when we are allocating or freeing one, we can derive the state of the
  337. * other. That is, if we allocate a small block, and both were
  338. * free, the remainder of the region must be split into blocks.
  339. * If a block is freed, and its buddy is also free, then this
  340. * triggers coalescing into a block of larger size.
  341. *
  342. * -- wli
  343. */
  344. static inline void __free_one_page(struct page *page,
  345. struct zone *zone, unsigned int order)
  346. {
  347. unsigned long page_idx;
  348. int order_size = 1 << order;
  349. if (unlikely(PageCompound(page)))
  350. destroy_compound_page(page, order);
  351. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  352. VM_BUG_ON(page_idx & (order_size - 1));
  353. VM_BUG_ON(bad_range(zone, page));
  354. zone->free_pages += order_size;
  355. while (order < MAX_ORDER-1) {
  356. unsigned long combined_idx;
  357. struct free_area *area;
  358. struct page *buddy;
  359. buddy = __page_find_buddy(page, page_idx, order);
  360. if (!page_is_buddy(page, buddy, order))
  361. break; /* Move the buddy up one level. */
  362. list_del(&buddy->lru);
  363. area = zone->free_area + order;
  364. area->nr_free--;
  365. rmv_page_order(buddy);
  366. combined_idx = __find_combined_index(page_idx, order);
  367. page = page + (combined_idx - page_idx);
  368. page_idx = combined_idx;
  369. order++;
  370. }
  371. set_page_order(page, order);
  372. list_add(&page->lru, &zone->free_area[order].free_list);
  373. zone->free_area[order].nr_free++;
  374. }
  375. static inline int free_pages_check(struct page *page)
  376. {
  377. if (unlikely(page_mapcount(page) |
  378. (page->mapping != NULL) |
  379. (page_count(page) != 0) |
  380. (page->flags & (
  381. 1 << PG_lru |
  382. 1 << PG_private |
  383. 1 << PG_locked |
  384. 1 << PG_active |
  385. 1 << PG_reclaim |
  386. 1 << PG_slab |
  387. 1 << PG_swapcache |
  388. 1 << PG_writeback |
  389. 1 << PG_reserved |
  390. 1 << PG_buddy ))))
  391. bad_page(page);
  392. if (PageDirty(page))
  393. __ClearPageDirty(page);
  394. /*
  395. * For now, we report if PG_reserved was found set, but do not
  396. * clear it, and do not free the page. But we shall soon need
  397. * to do more, for when the ZERO_PAGE count wraps negative.
  398. */
  399. return PageReserved(page);
  400. }
  401. /*
  402. * Frees a list of pages.
  403. * Assumes all pages on list are in same zone, and of same order.
  404. * count is the number of pages to free.
  405. *
  406. * If the zone was previously in an "all pages pinned" state then look to
  407. * see if this freeing clears that state.
  408. *
  409. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  410. * pinned" detection logic.
  411. */
  412. static void free_pages_bulk(struct zone *zone, int count,
  413. struct list_head *list, int order)
  414. {
  415. spin_lock(&zone->lock);
  416. zone->all_unreclaimable = 0;
  417. zone->pages_scanned = 0;
  418. while (count--) {
  419. struct page *page;
  420. VM_BUG_ON(list_empty(list));
  421. page = list_entry(list->prev, struct page, lru);
  422. /* have to delete it as __free_one_page list manipulates */
  423. list_del(&page->lru);
  424. __free_one_page(page, zone, order);
  425. }
  426. spin_unlock(&zone->lock);
  427. }
  428. static void free_one_page(struct zone *zone, struct page *page, int order)
  429. {
  430. spin_lock(&zone->lock);
  431. zone->all_unreclaimable = 0;
  432. zone->pages_scanned = 0;
  433. __free_one_page(page, zone, order);
  434. spin_unlock(&zone->lock);
  435. }
  436. static void __free_pages_ok(struct page *page, unsigned int order)
  437. {
  438. unsigned long flags;
  439. int i;
  440. int reserved = 0;
  441. for (i = 0 ; i < (1 << order) ; ++i)
  442. reserved += free_pages_check(page + i);
  443. if (reserved)
  444. return;
  445. if (!PageHighMem(page))
  446. debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  447. arch_free_page(page, order);
  448. kernel_map_pages(page, 1 << order, 0);
  449. local_irq_save(flags);
  450. __count_vm_events(PGFREE, 1 << order);
  451. free_one_page(page_zone(page), page, order);
  452. local_irq_restore(flags);
  453. }
  454. /*
  455. * permit the bootmem allocator to evade page validation on high-order frees
  456. */
  457. void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
  458. {
  459. if (order == 0) {
  460. __ClearPageReserved(page);
  461. set_page_count(page, 0);
  462. set_page_refcounted(page);
  463. __free_page(page);
  464. } else {
  465. int loop;
  466. prefetchw(page);
  467. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  468. struct page *p = &page[loop];
  469. if (loop + 1 < BITS_PER_LONG)
  470. prefetchw(p + 1);
  471. __ClearPageReserved(p);
  472. set_page_count(p, 0);
  473. }
  474. set_page_refcounted(page);
  475. __free_pages(page, order);
  476. }
  477. }
  478. /*
  479. * The order of subdivision here is critical for the IO subsystem.
  480. * Please do not alter this order without good reasons and regression
  481. * testing. Specifically, as large blocks of memory are subdivided,
  482. * the order in which smaller blocks are delivered depends on the order
  483. * they're subdivided in this function. This is the primary factor
  484. * influencing the order in which pages are delivered to the IO
  485. * subsystem according to empirical testing, and this is also justified
  486. * by considering the behavior of a buddy system containing a single
  487. * large block of memory acted on by a series of small allocations.
  488. * This behavior is a critical factor in sglist merging's success.
  489. *
  490. * -- wli
  491. */
  492. static inline void expand(struct zone *zone, struct page *page,
  493. int low, int high, struct free_area *area)
  494. {
  495. unsigned long size = 1 << high;
  496. while (high > low) {
  497. area--;
  498. high--;
  499. size >>= 1;
  500. VM_BUG_ON(bad_range(zone, &page[size]));
  501. list_add(&page[size].lru, &area->free_list);
  502. area->nr_free++;
  503. set_page_order(&page[size], high);
  504. }
  505. }
  506. /*
  507. * This page is about to be returned from the page allocator
  508. */
  509. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  510. {
  511. if (unlikely(page_mapcount(page) |
  512. (page->mapping != NULL) |
  513. (page_count(page) != 0) |
  514. (page->flags & (
  515. 1 << PG_lru |
  516. 1 << PG_private |
  517. 1 << PG_locked |
  518. 1 << PG_active |
  519. 1 << PG_dirty |
  520. 1 << PG_reclaim |
  521. 1 << PG_slab |
  522. 1 << PG_swapcache |
  523. 1 << PG_writeback |
  524. 1 << PG_reserved |
  525. 1 << PG_buddy ))))
  526. bad_page(page);
  527. /*
  528. * For now, we report if PG_reserved was found set, but do not
  529. * clear it, and do not allocate the page: as a safety net.
  530. */
  531. if (PageReserved(page))
  532. return 1;
  533. page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
  534. 1 << PG_referenced | 1 << PG_arch_1 |
  535. 1 << PG_checked | 1 << PG_mappedtodisk);
  536. set_page_private(page, 0);
  537. set_page_refcounted(page);
  538. arch_alloc_page(page, order);
  539. kernel_map_pages(page, 1 << order, 1);
  540. if (gfp_flags & __GFP_ZERO)
  541. prep_zero_page(page, order, gfp_flags);
  542. if (order && (gfp_flags & __GFP_COMP))
  543. prep_compound_page(page, order);
  544. return 0;
  545. }
  546. /*
  547. * Do the hard work of removing an element from the buddy allocator.
  548. * Call me with the zone->lock already held.
  549. */
  550. static struct page *__rmqueue(struct zone *zone, unsigned int order)
  551. {
  552. struct free_area * area;
  553. unsigned int current_order;
  554. struct page *page;
  555. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  556. area = zone->free_area + current_order;
  557. if (list_empty(&area->free_list))
  558. continue;
  559. page = list_entry(area->free_list.next, struct page, lru);
  560. list_del(&page->lru);
  561. rmv_page_order(page);
  562. area->nr_free--;
  563. zone->free_pages -= 1UL << order;
  564. expand(zone, page, order, current_order, area);
  565. return page;
  566. }
  567. return NULL;
  568. }
  569. /*
  570. * Obtain a specified number of elements from the buddy allocator, all under
  571. * a single hold of the lock, for efficiency. Add them to the supplied list.
  572. * Returns the number of new pages which were placed at *list.
  573. */
  574. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  575. unsigned long count, struct list_head *list)
  576. {
  577. int i;
  578. spin_lock(&zone->lock);
  579. for (i = 0; i < count; ++i) {
  580. struct page *page = __rmqueue(zone, order);
  581. if (unlikely(page == NULL))
  582. break;
  583. list_add_tail(&page->lru, list);
  584. }
  585. spin_unlock(&zone->lock);
  586. return i;
  587. }
  588. #ifdef CONFIG_NUMA
  589. /*
  590. * Called from the slab reaper to drain pagesets on a particular node that
  591. * belongs to the currently executing processor.
  592. * Note that this function must be called with the thread pinned to
  593. * a single processor.
  594. */
  595. void drain_node_pages(int nodeid)
  596. {
  597. int i;
  598. enum zone_type z;
  599. unsigned long flags;
  600. for (z = 0; z < MAX_NR_ZONES; z++) {
  601. struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
  602. struct per_cpu_pageset *pset;
  603. if (!populated_zone(zone))
  604. continue;
  605. pset = zone_pcp(zone, smp_processor_id());
  606. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  607. struct per_cpu_pages *pcp;
  608. pcp = &pset->pcp[i];
  609. if (pcp->count) {
  610. int to_drain;
  611. local_irq_save(flags);
  612. if (pcp->count >= pcp->batch)
  613. to_drain = pcp->batch;
  614. else
  615. to_drain = pcp->count;
  616. free_pages_bulk(zone, to_drain, &pcp->list, 0);
  617. pcp->count -= to_drain;
  618. local_irq_restore(flags);
  619. }
  620. }
  621. }
  622. }
  623. #endif
  624. static void __drain_pages(unsigned int cpu)
  625. {
  626. unsigned long flags;
  627. struct zone *zone;
  628. int i;
  629. for_each_zone(zone) {
  630. struct per_cpu_pageset *pset;
  631. if (!populated_zone(zone))
  632. continue;
  633. pset = zone_pcp(zone, cpu);
  634. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  635. struct per_cpu_pages *pcp;
  636. pcp = &pset->pcp[i];
  637. local_irq_save(flags);
  638. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  639. pcp->count = 0;
  640. local_irq_restore(flags);
  641. }
  642. }
  643. }
  644. #ifdef CONFIG_PM
  645. void mark_free_pages(struct zone *zone)
  646. {
  647. unsigned long pfn, max_zone_pfn;
  648. unsigned long flags;
  649. int order;
  650. struct list_head *curr;
  651. if (!zone->spanned_pages)
  652. return;
  653. spin_lock_irqsave(&zone->lock, flags);
  654. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  655. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  656. if (pfn_valid(pfn)) {
  657. struct page *page = pfn_to_page(pfn);
  658. if (!PageNosave(page))
  659. ClearPageNosaveFree(page);
  660. }
  661. for (order = MAX_ORDER - 1; order >= 0; --order)
  662. list_for_each(curr, &zone->free_area[order].free_list) {
  663. unsigned long i;
  664. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  665. for (i = 0; i < (1UL << order); i++)
  666. SetPageNosaveFree(pfn_to_page(pfn + i));
  667. }
  668. spin_unlock_irqrestore(&zone->lock, flags);
  669. }
  670. /*
  671. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  672. */
  673. void drain_local_pages(void)
  674. {
  675. unsigned long flags;
  676. local_irq_save(flags);
  677. __drain_pages(smp_processor_id());
  678. local_irq_restore(flags);
  679. }
  680. #endif /* CONFIG_PM */
  681. /*
  682. * Free a 0-order page
  683. */
  684. static void fastcall free_hot_cold_page(struct page *page, int cold)
  685. {
  686. struct zone *zone = page_zone(page);
  687. struct per_cpu_pages *pcp;
  688. unsigned long flags;
  689. if (PageAnon(page))
  690. page->mapping = NULL;
  691. if (free_pages_check(page))
  692. return;
  693. if (!PageHighMem(page))
  694. debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
  695. arch_free_page(page, 0);
  696. kernel_map_pages(page, 1, 0);
  697. pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
  698. local_irq_save(flags);
  699. __count_vm_event(PGFREE);
  700. list_add(&page->lru, &pcp->list);
  701. pcp->count++;
  702. if (pcp->count >= pcp->high) {
  703. free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
  704. pcp->count -= pcp->batch;
  705. }
  706. local_irq_restore(flags);
  707. put_cpu();
  708. }
  709. void fastcall free_hot_page(struct page *page)
  710. {
  711. free_hot_cold_page(page, 0);
  712. }
  713. void fastcall free_cold_page(struct page *page)
  714. {
  715. free_hot_cold_page(page, 1);
  716. }
  717. /*
  718. * split_page takes a non-compound higher-order page, and splits it into
  719. * n (1<<order) sub-pages: page[0..n]
  720. * Each sub-page must be freed individually.
  721. *
  722. * Note: this is probably too low level an operation for use in drivers.
  723. * Please consult with lkml before using this in your driver.
  724. */
  725. void split_page(struct page *page, unsigned int order)
  726. {
  727. int i;
  728. VM_BUG_ON(PageCompound(page));
  729. VM_BUG_ON(!page_count(page));
  730. for (i = 1; i < (1 << order); i++)
  731. set_page_refcounted(page + i);
  732. }
  733. /*
  734. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  735. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  736. * or two.
  737. */
  738. static struct page *buffered_rmqueue(struct zonelist *zonelist,
  739. struct zone *zone, int order, gfp_t gfp_flags)
  740. {
  741. unsigned long flags;
  742. struct page *page;
  743. int cold = !!(gfp_flags & __GFP_COLD);
  744. int cpu;
  745. again:
  746. cpu = get_cpu();
  747. if (likely(order == 0)) {
  748. struct per_cpu_pages *pcp;
  749. pcp = &zone_pcp(zone, cpu)->pcp[cold];
  750. local_irq_save(flags);
  751. if (!pcp->count) {
  752. pcp->count = rmqueue_bulk(zone, 0,
  753. pcp->batch, &pcp->list);
  754. if (unlikely(!pcp->count))
  755. goto failed;
  756. }
  757. page = list_entry(pcp->list.next, struct page, lru);
  758. list_del(&page->lru);
  759. pcp->count--;
  760. } else {
  761. spin_lock_irqsave(&zone->lock, flags);
  762. page = __rmqueue(zone, order);
  763. spin_unlock(&zone->lock);
  764. if (!page)
  765. goto failed;
  766. }
  767. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  768. zone_statistics(zonelist, zone);
  769. local_irq_restore(flags);
  770. put_cpu();
  771. VM_BUG_ON(bad_range(zone, page));
  772. if (prep_new_page(page, order, gfp_flags))
  773. goto again;
  774. return page;
  775. failed:
  776. local_irq_restore(flags);
  777. put_cpu();
  778. return NULL;
  779. }
  780. #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
  781. #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
  782. #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
  783. #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
  784. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  785. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  786. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  787. #ifdef CONFIG_FAIL_PAGE_ALLOC
  788. static struct fail_page_alloc_attr {
  789. struct fault_attr attr;
  790. u32 ignore_gfp_highmem;
  791. u32 ignore_gfp_wait;
  792. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  793. struct dentry *ignore_gfp_highmem_file;
  794. struct dentry *ignore_gfp_wait_file;
  795. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  796. } fail_page_alloc = {
  797. .attr = FAULT_ATTR_INITIALIZER,
  798. .ignore_gfp_wait = 1,
  799. .ignore_gfp_highmem = 1,
  800. };
  801. static int __init setup_fail_page_alloc(char *str)
  802. {
  803. return setup_fault_attr(&fail_page_alloc.attr, str);
  804. }
  805. __setup("fail_page_alloc=", setup_fail_page_alloc);
  806. static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  807. {
  808. if (gfp_mask & __GFP_NOFAIL)
  809. return 0;
  810. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  811. return 0;
  812. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  813. return 0;
  814. return should_fail(&fail_page_alloc.attr, 1 << order);
  815. }
  816. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  817. static int __init fail_page_alloc_debugfs(void)
  818. {
  819. mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  820. struct dentry *dir;
  821. int err;
  822. err = init_fault_attr_dentries(&fail_page_alloc.attr,
  823. "fail_page_alloc");
  824. if (err)
  825. return err;
  826. dir = fail_page_alloc.attr.dentries.dir;
  827. fail_page_alloc.ignore_gfp_wait_file =
  828. debugfs_create_bool("ignore-gfp-wait", mode, dir,
  829. &fail_page_alloc.ignore_gfp_wait);
  830. fail_page_alloc.ignore_gfp_highmem_file =
  831. debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  832. &fail_page_alloc.ignore_gfp_highmem);
  833. if (!fail_page_alloc.ignore_gfp_wait_file ||
  834. !fail_page_alloc.ignore_gfp_highmem_file) {
  835. err = -ENOMEM;
  836. debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
  837. debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
  838. cleanup_fault_attr_dentries(&fail_page_alloc.attr);
  839. }
  840. return err;
  841. }
  842. late_initcall(fail_page_alloc_debugfs);
  843. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  844. #else /* CONFIG_FAIL_PAGE_ALLOC */
  845. static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  846. {
  847. return 0;
  848. }
  849. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  850. /*
  851. * Return 1 if free pages are above 'mark'. This takes into account the order
  852. * of the allocation.
  853. */
  854. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  855. int classzone_idx, int alloc_flags)
  856. {
  857. /* free_pages my go negative - that's OK */
  858. long min = mark, free_pages = z->free_pages - (1 << order) + 1;
  859. int o;
  860. if (alloc_flags & ALLOC_HIGH)
  861. min -= min / 2;
  862. if (alloc_flags & ALLOC_HARDER)
  863. min -= min / 4;
  864. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  865. return 0;
  866. for (o = 0; o < order; o++) {
  867. /* At the next order, this order's pages become unavailable */
  868. free_pages -= z->free_area[o].nr_free << o;
  869. /* Require fewer higher order pages to be free */
  870. min >>= 1;
  871. if (free_pages <= min)
  872. return 0;
  873. }
  874. return 1;
  875. }
  876. #ifdef CONFIG_NUMA
  877. /*
  878. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  879. * skip over zones that are not allowed by the cpuset, or that have
  880. * been recently (in last second) found to be nearly full. See further
  881. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  882. * that have to skip over alot of full or unallowed zones.
  883. *
  884. * If the zonelist cache is present in the passed in zonelist, then
  885. * returns a pointer to the allowed node mask (either the current
  886. * tasks mems_allowed, or node_online_map.)
  887. *
  888. * If the zonelist cache is not available for this zonelist, does
  889. * nothing and returns NULL.
  890. *
  891. * If the fullzones BITMAP in the zonelist cache is stale (more than
  892. * a second since last zap'd) then we zap it out (clear its bits.)
  893. *
  894. * We hold off even calling zlc_setup, until after we've checked the
  895. * first zone in the zonelist, on the theory that most allocations will
  896. * be satisfied from that first zone, so best to examine that zone as
  897. * quickly as we can.
  898. */
  899. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  900. {
  901. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  902. nodemask_t *allowednodes; /* zonelist_cache approximation */
  903. zlc = zonelist->zlcache_ptr;
  904. if (!zlc)
  905. return NULL;
  906. if (jiffies - zlc->last_full_zap > 1 * HZ) {
  907. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  908. zlc->last_full_zap = jiffies;
  909. }
  910. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  911. &cpuset_current_mems_allowed :
  912. &node_online_map;
  913. return allowednodes;
  914. }
  915. /*
  916. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  917. * if it is worth looking at further for free memory:
  918. * 1) Check that the zone isn't thought to be full (doesn't have its
  919. * bit set in the zonelist_cache fullzones BITMAP).
  920. * 2) Check that the zones node (obtained from the zonelist_cache
  921. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  922. * Return true (non-zero) if zone is worth looking at further, or
  923. * else return false (zero) if it is not.
  924. *
  925. * This check -ignores- the distinction between various watermarks,
  926. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  927. * found to be full for any variation of these watermarks, it will
  928. * be considered full for up to one second by all requests, unless
  929. * we are so low on memory on all allowed nodes that we are forced
  930. * into the second scan of the zonelist.
  931. *
  932. * In the second scan we ignore this zonelist cache and exactly
  933. * apply the watermarks to all zones, even it is slower to do so.
  934. * We are low on memory in the second scan, and should leave no stone
  935. * unturned looking for a free page.
  936. */
  937. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
  938. nodemask_t *allowednodes)
  939. {
  940. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  941. int i; /* index of *z in zonelist zones */
  942. int n; /* node that zone *z is on */
  943. zlc = zonelist->zlcache_ptr;
  944. if (!zlc)
  945. return 1;
  946. i = z - zonelist->zones;
  947. n = zlc->z_to_n[i];
  948. /* This zone is worth trying if it is allowed but not full */
  949. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  950. }
  951. /*
  952. * Given 'z' scanning a zonelist, set the corresponding bit in
  953. * zlc->fullzones, so that subsequent attempts to allocate a page
  954. * from that zone don't waste time re-examining it.
  955. */
  956. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
  957. {
  958. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  959. int i; /* index of *z in zonelist zones */
  960. zlc = zonelist->zlcache_ptr;
  961. if (!zlc)
  962. return;
  963. i = z - zonelist->zones;
  964. set_bit(i, zlc->fullzones);
  965. }
  966. #else /* CONFIG_NUMA */
  967. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  968. {
  969. return NULL;
  970. }
  971. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
  972. nodemask_t *allowednodes)
  973. {
  974. return 1;
  975. }
  976. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
  977. {
  978. }
  979. #endif /* CONFIG_NUMA */
  980. /*
  981. * get_page_from_freelist goes through the zonelist trying to allocate
  982. * a page.
  983. */
  984. static struct page *
  985. get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
  986. struct zonelist *zonelist, int alloc_flags)
  987. {
  988. struct zone **z;
  989. struct page *page = NULL;
  990. int classzone_idx = zone_idx(zonelist->zones[0]);
  991. struct zone *zone;
  992. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  993. int zlc_active = 0; /* set if using zonelist_cache */
  994. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  995. zonelist_scan:
  996. /*
  997. * Scan zonelist, looking for a zone with enough free.
  998. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  999. */
  1000. z = zonelist->zones;
  1001. do {
  1002. if (NUMA_BUILD && zlc_active &&
  1003. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1004. continue;
  1005. zone = *z;
  1006. if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
  1007. zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
  1008. break;
  1009. if ((alloc_flags & ALLOC_CPUSET) &&
  1010. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1011. goto try_next_zone;
  1012. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  1013. unsigned long mark;
  1014. if (alloc_flags & ALLOC_WMARK_MIN)
  1015. mark = zone->pages_min;
  1016. else if (alloc_flags & ALLOC_WMARK_LOW)
  1017. mark = zone->pages_low;
  1018. else
  1019. mark = zone->pages_high;
  1020. if (!zone_watermark_ok(zone, order, mark,
  1021. classzone_idx, alloc_flags)) {
  1022. if (!zone_reclaim_mode ||
  1023. !zone_reclaim(zone, gfp_mask, order))
  1024. goto this_zone_full;
  1025. }
  1026. }
  1027. page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
  1028. if (page)
  1029. break;
  1030. this_zone_full:
  1031. if (NUMA_BUILD)
  1032. zlc_mark_zone_full(zonelist, z);
  1033. try_next_zone:
  1034. if (NUMA_BUILD && !did_zlc_setup) {
  1035. /* we do zlc_setup after the first zone is tried */
  1036. allowednodes = zlc_setup(zonelist, alloc_flags);
  1037. zlc_active = 1;
  1038. did_zlc_setup = 1;
  1039. }
  1040. } while (*(++z) != NULL);
  1041. if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
  1042. /* Disable zlc cache for second zonelist scan */
  1043. zlc_active = 0;
  1044. goto zonelist_scan;
  1045. }
  1046. return page;
  1047. }
  1048. /*
  1049. * This is the 'heart' of the zoned buddy allocator.
  1050. */
  1051. struct page * fastcall
  1052. __alloc_pages(gfp_t gfp_mask, unsigned int order,
  1053. struct zonelist *zonelist)
  1054. {
  1055. const gfp_t wait = gfp_mask & __GFP_WAIT;
  1056. struct zone **z;
  1057. struct page *page;
  1058. struct reclaim_state reclaim_state;
  1059. struct task_struct *p = current;
  1060. int do_retry;
  1061. int alloc_flags;
  1062. int did_some_progress;
  1063. might_sleep_if(wait);
  1064. if (should_fail_alloc_page(gfp_mask, order))
  1065. return NULL;
  1066. restart:
  1067. z = zonelist->zones; /* the list of zones suitable for gfp_mask */
  1068. if (unlikely(*z == NULL)) {
  1069. /* Should this ever happen?? */
  1070. return NULL;
  1071. }
  1072. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  1073. zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
  1074. if (page)
  1075. goto got_pg;
  1076. /*
  1077. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  1078. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  1079. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  1080. * using a larger set of nodes after it has established that the
  1081. * allowed per node queues are empty and that nodes are
  1082. * over allocated.
  1083. */
  1084. if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  1085. goto nopage;
  1086. for (z = zonelist->zones; *z; z++)
  1087. wakeup_kswapd(*z, order);
  1088. /*
  1089. * OK, we're below the kswapd watermark and have kicked background
  1090. * reclaim. Now things get more complex, so set up alloc_flags according
  1091. * to how we want to proceed.
  1092. *
  1093. * The caller may dip into page reserves a bit more if the caller
  1094. * cannot run direct reclaim, or if the caller has realtime scheduling
  1095. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  1096. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  1097. */
  1098. alloc_flags = ALLOC_WMARK_MIN;
  1099. if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
  1100. alloc_flags |= ALLOC_HARDER;
  1101. if (gfp_mask & __GFP_HIGH)
  1102. alloc_flags |= ALLOC_HIGH;
  1103. if (wait)
  1104. alloc_flags |= ALLOC_CPUSET;
  1105. /*
  1106. * Go through the zonelist again. Let __GFP_HIGH and allocations
  1107. * coming from realtime tasks go deeper into reserves.
  1108. *
  1109. * This is the last chance, in general, before the goto nopage.
  1110. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  1111. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  1112. */
  1113. page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
  1114. if (page)
  1115. goto got_pg;
  1116. /* This allocation should allow future memory freeing. */
  1117. rebalance:
  1118. if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
  1119. && !in_interrupt()) {
  1120. if (!(gfp_mask & __GFP_NOMEMALLOC)) {
  1121. nofail_alloc:
  1122. /* go through the zonelist yet again, ignoring mins */
  1123. page = get_page_from_freelist(gfp_mask, order,
  1124. zonelist, ALLOC_NO_WATERMARKS);
  1125. if (page)
  1126. goto got_pg;
  1127. if (gfp_mask & __GFP_NOFAIL) {
  1128. congestion_wait(WRITE, HZ/50);
  1129. goto nofail_alloc;
  1130. }
  1131. }
  1132. goto nopage;
  1133. }
  1134. /* Atomic allocations - we can't balance anything */
  1135. if (!wait)
  1136. goto nopage;
  1137. cond_resched();
  1138. /* We now go into synchronous reclaim */
  1139. cpuset_memory_pressure_bump();
  1140. p->flags |= PF_MEMALLOC;
  1141. reclaim_state.reclaimed_slab = 0;
  1142. p->reclaim_state = &reclaim_state;
  1143. did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
  1144. p->reclaim_state = NULL;
  1145. p->flags &= ~PF_MEMALLOC;
  1146. cond_resched();
  1147. if (likely(did_some_progress)) {
  1148. page = get_page_from_freelist(gfp_mask, order,
  1149. zonelist, alloc_flags);
  1150. if (page)
  1151. goto got_pg;
  1152. } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  1153. /*
  1154. * Go through the zonelist yet one more time, keep
  1155. * very high watermark here, this is only to catch
  1156. * a parallel oom killing, we must fail if we're still
  1157. * under heavy pressure.
  1158. */
  1159. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  1160. zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
  1161. if (page)
  1162. goto got_pg;
  1163. out_of_memory(zonelist, gfp_mask, order);
  1164. goto restart;
  1165. }
  1166. /*
  1167. * Don't let big-order allocations loop unless the caller explicitly
  1168. * requests that. Wait for some write requests to complete then retry.
  1169. *
  1170. * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
  1171. * <= 3, but that may not be true in other implementations.
  1172. */
  1173. do_retry = 0;
  1174. if (!(gfp_mask & __GFP_NORETRY)) {
  1175. if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
  1176. do_retry = 1;
  1177. if (gfp_mask & __GFP_NOFAIL)
  1178. do_retry = 1;
  1179. }
  1180. if (do_retry) {
  1181. congestion_wait(WRITE, HZ/50);
  1182. goto rebalance;
  1183. }
  1184. nopage:
  1185. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  1186. printk(KERN_WARNING "%s: page allocation failure."
  1187. " order:%d, mode:0x%x\n",
  1188. p->comm, order, gfp_mask);
  1189. dump_stack();
  1190. show_mem();
  1191. }
  1192. got_pg:
  1193. return page;
  1194. }
  1195. EXPORT_SYMBOL(__alloc_pages);
  1196. /*
  1197. * Common helper functions.
  1198. */
  1199. fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  1200. {
  1201. struct page * page;
  1202. page = alloc_pages(gfp_mask, order);
  1203. if (!page)
  1204. return 0;
  1205. return (unsigned long) page_address(page);
  1206. }
  1207. EXPORT_SYMBOL(__get_free_pages);
  1208. fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
  1209. {
  1210. struct page * page;
  1211. /*
  1212. * get_zeroed_page() returns a 32-bit address, which cannot represent
  1213. * a highmem page
  1214. */
  1215. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  1216. page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
  1217. if (page)
  1218. return (unsigned long) page_address(page);
  1219. return 0;
  1220. }
  1221. EXPORT_SYMBOL(get_zeroed_page);
  1222. void __pagevec_free(struct pagevec *pvec)
  1223. {
  1224. int i = pagevec_count(pvec);
  1225. while (--i >= 0)
  1226. free_hot_cold_page(pvec->pages[i], pvec->cold);
  1227. }
  1228. fastcall void __free_pages(struct page *page, unsigned int order)
  1229. {
  1230. if (put_page_testzero(page)) {
  1231. if (order == 0)
  1232. free_hot_page(page);
  1233. else
  1234. __free_pages_ok(page, order);
  1235. }
  1236. }
  1237. EXPORT_SYMBOL(__free_pages);
  1238. fastcall void free_pages(unsigned long addr, unsigned int order)
  1239. {
  1240. if (addr != 0) {
  1241. VM_BUG_ON(!virt_addr_valid((void *)addr));
  1242. __free_pages(virt_to_page((void *)addr), order);
  1243. }
  1244. }
  1245. EXPORT_SYMBOL(free_pages);
  1246. /*
  1247. * Total amount of free (allocatable) RAM:
  1248. */
  1249. unsigned int nr_free_pages(void)
  1250. {
  1251. unsigned int sum = 0;
  1252. struct zone *zone;
  1253. for_each_zone(zone)
  1254. sum += zone->free_pages;
  1255. return sum;
  1256. }
  1257. EXPORT_SYMBOL(nr_free_pages);
  1258. #ifdef CONFIG_NUMA
  1259. unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
  1260. {
  1261. unsigned int sum = 0;
  1262. enum zone_type i;
  1263. for (i = 0; i < MAX_NR_ZONES; i++)
  1264. sum += pgdat->node_zones[i].free_pages;
  1265. return sum;
  1266. }
  1267. #endif
  1268. static unsigned int nr_free_zone_pages(int offset)
  1269. {
  1270. /* Just pick one node, since fallback list is circular */
  1271. pg_data_t *pgdat = NODE_DATA(numa_node_id());
  1272. unsigned int sum = 0;
  1273. struct zonelist *zonelist = pgdat->node_zonelists + offset;
  1274. struct zone **zonep = zonelist->zones;
  1275. struct zone *zone;
  1276. for (zone = *zonep++; zone; zone = *zonep++) {
  1277. unsigned long size = zone->present_pages;
  1278. unsigned long high = zone->pages_high;
  1279. if (size > high)
  1280. sum += size - high;
  1281. }
  1282. return sum;
  1283. }
  1284. /*
  1285. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1286. */
  1287. unsigned int nr_free_buffer_pages(void)
  1288. {
  1289. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1290. }
  1291. /*
  1292. * Amount of free RAM allocatable within all zones
  1293. */
  1294. unsigned int nr_free_pagecache_pages(void)
  1295. {
  1296. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
  1297. }
  1298. static inline void show_node(struct zone *zone)
  1299. {
  1300. if (NUMA_BUILD)
  1301. printk("Node %d ", zone_to_nid(zone));
  1302. }
  1303. void si_meminfo(struct sysinfo *val)
  1304. {
  1305. val->totalram = totalram_pages;
  1306. val->sharedram = 0;
  1307. val->freeram = nr_free_pages();
  1308. val->bufferram = nr_blockdev_pages();
  1309. val->totalhigh = totalhigh_pages;
  1310. val->freehigh = nr_free_highpages();
  1311. val->mem_unit = PAGE_SIZE;
  1312. }
  1313. EXPORT_SYMBOL(si_meminfo);
  1314. #ifdef CONFIG_NUMA
  1315. void si_meminfo_node(struct sysinfo *val, int nid)
  1316. {
  1317. pg_data_t *pgdat = NODE_DATA(nid);
  1318. val->totalram = pgdat->node_present_pages;
  1319. val->freeram = nr_free_pages_pgdat(pgdat);
  1320. #ifdef CONFIG_HIGHMEM
  1321. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1322. val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
  1323. #else
  1324. val->totalhigh = 0;
  1325. val->freehigh = 0;
  1326. #endif
  1327. val->mem_unit = PAGE_SIZE;
  1328. }
  1329. #endif
  1330. #define K(x) ((x) << (PAGE_SHIFT-10))
  1331. /*
  1332. * Show free area list (used inside shift_scroll-lock stuff)
  1333. * We also calculate the percentage fragmentation. We do this by counting the
  1334. * memory on each free list with the exception of the first item on the list.
  1335. */
  1336. void show_free_areas(void)
  1337. {
  1338. int cpu;
  1339. unsigned long active;
  1340. unsigned long inactive;
  1341. unsigned long free;
  1342. struct zone *zone;
  1343. for_each_zone(zone) {
  1344. if (!populated_zone(zone))
  1345. continue;
  1346. show_node(zone);
  1347. printk("%s per-cpu:\n", zone->name);
  1348. for_each_online_cpu(cpu) {
  1349. struct per_cpu_pageset *pageset;
  1350. pageset = zone_pcp(zone, cpu);
  1351. printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
  1352. "Cold: hi:%5d, btch:%4d usd:%4d\n",
  1353. cpu, pageset->pcp[0].high,
  1354. pageset->pcp[0].batch, pageset->pcp[0].count,
  1355. pageset->pcp[1].high, pageset->pcp[1].batch,
  1356. pageset->pcp[1].count);
  1357. }
  1358. }
  1359. get_zone_counts(&active, &inactive, &free);
  1360. printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  1361. " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
  1362. active,
  1363. inactive,
  1364. global_page_state(NR_FILE_DIRTY),
  1365. global_page_state(NR_WRITEBACK),
  1366. global_page_state(NR_UNSTABLE_NFS),
  1367. nr_free_pages(),
  1368. global_page_state(NR_SLAB_RECLAIMABLE) +
  1369. global_page_state(NR_SLAB_UNRECLAIMABLE),
  1370. global_page_state(NR_FILE_MAPPED),
  1371. global_page_state(NR_PAGETABLE),
  1372. global_page_state(NR_BOUNCE));
  1373. for_each_zone(zone) {
  1374. int i;
  1375. if (!populated_zone(zone))
  1376. continue;
  1377. show_node(zone);
  1378. printk("%s"
  1379. " free:%lukB"
  1380. " min:%lukB"
  1381. " low:%lukB"
  1382. " high:%lukB"
  1383. " active:%lukB"
  1384. " inactive:%lukB"
  1385. " present:%lukB"
  1386. " pages_scanned:%lu"
  1387. " all_unreclaimable? %s"
  1388. "\n",
  1389. zone->name,
  1390. K(zone->free_pages),
  1391. K(zone->pages_min),
  1392. K(zone->pages_low),
  1393. K(zone->pages_high),
  1394. K(zone->nr_active),
  1395. K(zone->nr_inactive),
  1396. K(zone->present_pages),
  1397. zone->pages_scanned,
  1398. (zone->all_unreclaimable ? "yes" : "no")
  1399. );
  1400. printk("lowmem_reserve[]:");
  1401. for (i = 0; i < MAX_NR_ZONES; i++)
  1402. printk(" %lu", zone->lowmem_reserve[i]);
  1403. printk("\n");
  1404. }
  1405. for_each_zone(zone) {
  1406. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  1407. if (!populated_zone(zone))
  1408. continue;
  1409. show_node(zone);
  1410. printk("%s: ", zone->name);
  1411. spin_lock_irqsave(&zone->lock, flags);
  1412. for (order = 0; order < MAX_ORDER; order++) {
  1413. nr[order] = zone->free_area[order].nr_free;
  1414. total += nr[order] << order;
  1415. }
  1416. spin_unlock_irqrestore(&zone->lock, flags);
  1417. for (order = 0; order < MAX_ORDER; order++)
  1418. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  1419. printk("= %lukB\n", K(total));
  1420. }
  1421. show_swap_cache_info();
  1422. }
  1423. /*
  1424. * Builds allocation fallback zone lists.
  1425. *
  1426. * Add all populated zones of a node to the zonelist.
  1427. */
  1428. static int __meminit build_zonelists_node(pg_data_t *pgdat,
  1429. struct zonelist *zonelist, int nr_zones, enum zone_type zone_type)
  1430. {
  1431. struct zone *zone;
  1432. BUG_ON(zone_type >= MAX_NR_ZONES);
  1433. zone_type++;
  1434. do {
  1435. zone_type--;
  1436. zone = pgdat->node_zones + zone_type;
  1437. if (populated_zone(zone)) {
  1438. zonelist->zones[nr_zones++] = zone;
  1439. check_highest_zone(zone_type);
  1440. }
  1441. } while (zone_type);
  1442. return nr_zones;
  1443. }
  1444. #ifdef CONFIG_NUMA
  1445. #define MAX_NODE_LOAD (num_online_nodes())
  1446. static int __meminitdata node_load[MAX_NUMNODES];
  1447. /**
  1448. * find_next_best_node - find the next node that should appear in a given node's fallback list
  1449. * @node: node whose fallback list we're appending
  1450. * @used_node_mask: nodemask_t of already used nodes
  1451. *
  1452. * We use a number of factors to determine which is the next node that should
  1453. * appear on a given node's fallback list. The node should not have appeared
  1454. * already in @node's fallback list, and it should be the next closest node
  1455. * according to the distance array (which contains arbitrary distance values
  1456. * from each node to each node in the system), and should also prefer nodes
  1457. * with no CPUs, since presumably they'll have very little allocation pressure
  1458. * on them otherwise.
  1459. * It returns -1 if no node is found.
  1460. */
  1461. static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
  1462. {
  1463. int n, val;
  1464. int min_val = INT_MAX;
  1465. int best_node = -1;
  1466. /* Use the local node if we haven't already */
  1467. if (!node_isset(node, *used_node_mask)) {
  1468. node_set(node, *used_node_mask);
  1469. return node;
  1470. }
  1471. for_each_online_node(n) {
  1472. cpumask_t tmp;
  1473. /* Don't want a node to appear more than once */
  1474. if (node_isset(n, *used_node_mask))
  1475. continue;
  1476. /* Use the distance array to find the distance */
  1477. val = node_distance(node, n);
  1478. /* Penalize nodes under us ("prefer the next node") */
  1479. val += (n < node);
  1480. /* Give preference to headless and unused nodes */
  1481. tmp = node_to_cpumask(n);
  1482. if (!cpus_empty(tmp))
  1483. val += PENALTY_FOR_NODE_WITH_CPUS;
  1484. /* Slight preference for less loaded node */
  1485. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  1486. val += node_load[n];
  1487. if (val < min_val) {
  1488. min_val = val;
  1489. best_node = n;
  1490. }
  1491. }
  1492. if (best_node >= 0)
  1493. node_set(best_node, *used_node_mask);
  1494. return best_node;
  1495. }
  1496. static void __meminit build_zonelists(pg_data_t *pgdat)
  1497. {
  1498. int j, node, local_node;
  1499. enum zone_type i;
  1500. int prev_node, load;
  1501. struct zonelist *zonelist;
  1502. nodemask_t used_mask;
  1503. /* initialize zonelists */
  1504. for (i = 0; i < MAX_NR_ZONES; i++) {
  1505. zonelist = pgdat->node_zonelists + i;
  1506. zonelist->zones[0] = NULL;
  1507. }
  1508. /* NUMA-aware ordering of nodes */
  1509. local_node = pgdat->node_id;
  1510. load = num_online_nodes();
  1511. prev_node = local_node;
  1512. nodes_clear(used_mask);
  1513. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  1514. int distance = node_distance(local_node, node);
  1515. /*
  1516. * If another node is sufficiently far away then it is better
  1517. * to reclaim pages in a zone before going off node.
  1518. */
  1519. if (distance > RECLAIM_DISTANCE)
  1520. zone_reclaim_mode = 1;
  1521. /*
  1522. * We don't want to pressure a particular node.
  1523. * So adding penalty to the first node in same
  1524. * distance group to make it round-robin.
  1525. */
  1526. if (distance != node_distance(local_node, prev_node))
  1527. node_load[node] += load;
  1528. prev_node = node;
  1529. load--;
  1530. for (i = 0; i < MAX_NR_ZONES; i++) {
  1531. zonelist = pgdat->node_zonelists + i;
  1532. for (j = 0; zonelist->zones[j] != NULL; j++);
  1533. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1534. zonelist->zones[j] = NULL;
  1535. }
  1536. }
  1537. }
  1538. /* Construct the zonelist performance cache - see further mmzone.h */
  1539. static void __meminit build_zonelist_cache(pg_data_t *pgdat)
  1540. {
  1541. int i;
  1542. for (i = 0; i < MAX_NR_ZONES; i++) {
  1543. struct zonelist *zonelist;
  1544. struct zonelist_cache *zlc;
  1545. struct zone **z;
  1546. zonelist = pgdat->node_zonelists + i;
  1547. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  1548. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1549. for (z = zonelist->zones; *z; z++)
  1550. zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
  1551. }
  1552. }
  1553. #else /* CONFIG_NUMA */
  1554. static void __meminit build_zonelists(pg_data_t *pgdat)
  1555. {
  1556. int node, local_node;
  1557. enum zone_type i,j;
  1558. local_node = pgdat->node_id;
  1559. for (i = 0; i < MAX_NR_ZONES; i++) {
  1560. struct zonelist *zonelist;
  1561. zonelist = pgdat->node_zonelists + i;
  1562. j = build_zonelists_node(pgdat, zonelist, 0, i);
  1563. /*
  1564. * Now we build the zonelist so that it contains the zones
  1565. * of all the other nodes.
  1566. * We don't want to pressure a particular node, so when
  1567. * building the zones for node N, we make sure that the
  1568. * zones coming right after the local ones are those from
  1569. * node N+1 (modulo N)
  1570. */
  1571. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  1572. if (!node_online(node))
  1573. continue;
  1574. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1575. }
  1576. for (node = 0; node < local_node; node++) {
  1577. if (!node_online(node))
  1578. continue;
  1579. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1580. }
  1581. zonelist->zones[j] = NULL;
  1582. }
  1583. }
  1584. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  1585. static void __meminit build_zonelist_cache(pg_data_t *pgdat)
  1586. {
  1587. int i;
  1588. for (i = 0; i < MAX_NR_ZONES; i++)
  1589. pgdat->node_zonelists[i].zlcache_ptr = NULL;
  1590. }
  1591. #endif /* CONFIG_NUMA */
  1592. /* return values int ....just for stop_machine_run() */
  1593. static int __meminit __build_all_zonelists(void *dummy)
  1594. {
  1595. int nid;
  1596. for_each_online_node(nid) {
  1597. build_zonelists(NODE_DATA(nid));
  1598. build_zonelist_cache(NODE_DATA(nid));
  1599. }
  1600. return 0;
  1601. }
  1602. void __meminit build_all_zonelists(void)
  1603. {
  1604. if (system_state == SYSTEM_BOOTING) {
  1605. __build_all_zonelists(NULL);
  1606. cpuset_init_current_mems_allowed();
  1607. } else {
  1608. /* we have to stop all cpus to guaranntee there is no user
  1609. of zonelist */
  1610. stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
  1611. /* cpuset refresh routine should be here */
  1612. }
  1613. vm_total_pages = nr_free_pagecache_pages();
  1614. printk("Built %i zonelists. Total pages: %ld\n",
  1615. num_online_nodes(), vm_total_pages);
  1616. }
  1617. /*
  1618. * Helper functions to size the waitqueue hash table.
  1619. * Essentially these want to choose hash table sizes sufficiently
  1620. * large so that collisions trying to wait on pages are rare.
  1621. * But in fact, the number of active page waitqueues on typical
  1622. * systems is ridiculously low, less than 200. So this is even
  1623. * conservative, even though it seems large.
  1624. *
  1625. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  1626. * waitqueues, i.e. the size of the waitq table given the number of pages.
  1627. */
  1628. #define PAGES_PER_WAITQUEUE 256
  1629. #ifndef CONFIG_MEMORY_HOTPLUG
  1630. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  1631. {
  1632. unsigned long size = 1;
  1633. pages /= PAGES_PER_WAITQUEUE;
  1634. while (size < pages)
  1635. size <<= 1;
  1636. /*
  1637. * Once we have dozens or even hundreds of threads sleeping
  1638. * on IO we've got bigger problems than wait queue collision.
  1639. * Limit the size of the wait table to a reasonable size.
  1640. */
  1641. size = min(size, 4096UL);
  1642. return max(size, 4UL);
  1643. }
  1644. #else
  1645. /*
  1646. * A zone's size might be changed by hot-add, so it is not possible to determine
  1647. * a suitable size for its wait_table. So we use the maximum size now.
  1648. *
  1649. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  1650. *
  1651. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  1652. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  1653. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  1654. *
  1655. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  1656. * or more by the traditional way. (See above). It equals:
  1657. *
  1658. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  1659. * ia64(16K page size) : = ( 8G + 4M)byte.
  1660. * powerpc (64K page size) : = (32G +16M)byte.
  1661. */
  1662. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  1663. {
  1664. return 4096UL;
  1665. }
  1666. #endif
  1667. /*
  1668. * This is an integer logarithm so that shifts can be used later
  1669. * to extract the more random high bits from the multiplicative
  1670. * hash function before the remainder is taken.
  1671. */
  1672. static inline unsigned long wait_table_bits(unsigned long size)
  1673. {
  1674. return ffz(~size);
  1675. }
  1676. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  1677. /*
  1678. * Initially all pages are reserved - free ones are freed
  1679. * up by free_all_bootmem() once the early boot process is
  1680. * done. Non-atomic initialization, single-pass.
  1681. */
  1682. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  1683. unsigned long start_pfn, enum memmap_context context)
  1684. {
  1685. struct page *page;
  1686. unsigned long end_pfn = start_pfn + size;
  1687. unsigned long pfn;
  1688. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1689. /*
  1690. * There can be holes in boot-time mem_map[]s
  1691. * handed to this function. They do not
  1692. * exist on hotplugged memory.
  1693. */
  1694. if (context == MEMMAP_EARLY) {
  1695. if (!early_pfn_valid(pfn))
  1696. continue;
  1697. if (!early_pfn_in_nid(pfn, nid))
  1698. continue;
  1699. }
  1700. page = pfn_to_page(pfn);
  1701. set_page_links(page, zone, nid, pfn);
  1702. init_page_count(page);
  1703. reset_page_mapcount(page);
  1704. SetPageReserved(page);
  1705. INIT_LIST_HEAD(&page->lru);
  1706. #ifdef WANT_PAGE_VIRTUAL
  1707. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  1708. if (!is_highmem_idx(zone))
  1709. set_page_address(page, __va(pfn << PAGE_SHIFT));
  1710. #endif
  1711. }
  1712. }
  1713. void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
  1714. unsigned long size)
  1715. {
  1716. int order;
  1717. for (order = 0; order < MAX_ORDER ; order++) {
  1718. INIT_LIST_HEAD(&zone->free_area[order].free_list);
  1719. zone->free_area[order].nr_free = 0;
  1720. }
  1721. }
  1722. #ifndef __HAVE_ARCH_MEMMAP_INIT
  1723. #define memmap_init(size, nid, zone, start_pfn) \
  1724. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  1725. #endif
  1726. static int __cpuinit zone_batchsize(struct zone *zone)
  1727. {
  1728. int batch;
  1729. /*
  1730. * The per-cpu-pages pools are set to around 1000th of the
  1731. * size of the zone. But no more than 1/2 of a meg.
  1732. *
  1733. * OK, so we don't know how big the cache is. So guess.
  1734. */
  1735. batch = zone->present_pages / 1024;
  1736. if (batch * PAGE_SIZE > 512 * 1024)
  1737. batch = (512 * 1024) / PAGE_SIZE;
  1738. batch /= 4; /* We effectively *= 4 below */
  1739. if (batch < 1)
  1740. batch = 1;
  1741. /*
  1742. * Clamp the batch to a 2^n - 1 value. Having a power
  1743. * of 2 value was found to be more likely to have
  1744. * suboptimal cache aliasing properties in some cases.
  1745. *
  1746. * For example if 2 tasks are alternately allocating
  1747. * batches of pages, one task can end up with a lot
  1748. * of pages of one half of the possible page colors
  1749. * and the other with pages of the other colors.
  1750. */
  1751. batch = (1 << (fls(batch + batch/2)-1)) - 1;
  1752. return batch;
  1753. }
  1754. inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  1755. {
  1756. struct per_cpu_pages *pcp;
  1757. memset(p, 0, sizeof(*p));
  1758. pcp = &p->pcp[0]; /* hot */
  1759. pcp->count = 0;
  1760. pcp->high = 6 * batch;
  1761. pcp->batch = max(1UL, 1 * batch);
  1762. INIT_LIST_HEAD(&pcp->list);
  1763. pcp = &p->pcp[1]; /* cold*/
  1764. pcp->count = 0;
  1765. pcp->high = 2 * batch;
  1766. pcp->batch = max(1UL, batch/2);
  1767. INIT_LIST_HEAD(&pcp->list);
  1768. }
  1769. /*
  1770. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  1771. * to the value high for the pageset p.
  1772. */
  1773. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  1774. unsigned long high)
  1775. {
  1776. struct per_cpu_pages *pcp;
  1777. pcp = &p->pcp[0]; /* hot list */
  1778. pcp->high = high;
  1779. pcp->batch = max(1UL, high/4);
  1780. if ((high/4) > (PAGE_SHIFT * 8))
  1781. pcp->batch = PAGE_SHIFT * 8;
  1782. }
  1783. #ifdef CONFIG_NUMA
  1784. /*
  1785. * Boot pageset table. One per cpu which is going to be used for all
  1786. * zones and all nodes. The parameters will be set in such a way
  1787. * that an item put on a list will immediately be handed over to
  1788. * the buddy list. This is safe since pageset manipulation is done
  1789. * with interrupts disabled.
  1790. *
  1791. * Some NUMA counter updates may also be caught by the boot pagesets.
  1792. *
  1793. * The boot_pagesets must be kept even after bootup is complete for
  1794. * unused processors and/or zones. They do play a role for bootstrapping
  1795. * hotplugged processors.
  1796. *
  1797. * zoneinfo_show() and maybe other functions do
  1798. * not check if the processor is online before following the pageset pointer.
  1799. * Other parts of the kernel may not check if the zone is available.
  1800. */
  1801. static struct per_cpu_pageset boot_pageset[NR_CPUS];
  1802. /*
  1803. * Dynamically allocate memory for the
  1804. * per cpu pageset array in struct zone.
  1805. */
  1806. static int __cpuinit process_zones(int cpu)
  1807. {
  1808. struct zone *zone, *dzone;
  1809. for_each_zone(zone) {
  1810. if (!populated_zone(zone))
  1811. continue;
  1812. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  1813. GFP_KERNEL, cpu_to_node(cpu));
  1814. if (!zone_pcp(zone, cpu))
  1815. goto bad;
  1816. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  1817. if (percpu_pagelist_fraction)
  1818. setup_pagelist_highmark(zone_pcp(zone, cpu),
  1819. (zone->present_pages / percpu_pagelist_fraction));
  1820. }
  1821. return 0;
  1822. bad:
  1823. for_each_zone(dzone) {
  1824. if (dzone == zone)
  1825. break;
  1826. kfree(zone_pcp(dzone, cpu));
  1827. zone_pcp(dzone, cpu) = NULL;
  1828. }
  1829. return -ENOMEM;
  1830. }
  1831. static inline void free_zone_pagesets(int cpu)
  1832. {
  1833. struct zone *zone;
  1834. for_each_zone(zone) {
  1835. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  1836. /* Free per_cpu_pageset if it is slab allocated */
  1837. if (pset != &boot_pageset[cpu])
  1838. kfree(pset);
  1839. zone_pcp(zone, cpu) = NULL;
  1840. }
  1841. }
  1842. static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
  1843. unsigned long action,
  1844. void *hcpu)
  1845. {
  1846. int cpu = (long)hcpu;
  1847. int ret = NOTIFY_OK;
  1848. switch (action) {
  1849. case CPU_UP_PREPARE:
  1850. if (process_zones(cpu))
  1851. ret = NOTIFY_BAD;
  1852. break;
  1853. case CPU_UP_CANCELED:
  1854. case CPU_DEAD:
  1855. free_zone_pagesets(cpu);
  1856. break;
  1857. default:
  1858. break;
  1859. }
  1860. return ret;
  1861. }
  1862. static struct notifier_block __cpuinitdata pageset_notifier =
  1863. { &pageset_cpuup_callback, NULL, 0 };
  1864. void __init setup_per_cpu_pageset(void)
  1865. {
  1866. int err;
  1867. /* Initialize per_cpu_pageset for cpu 0.
  1868. * A cpuup callback will do this for every cpu
  1869. * as it comes online
  1870. */
  1871. err = process_zones(smp_processor_id());
  1872. BUG_ON(err);
  1873. register_cpu_notifier(&pageset_notifier);
  1874. }
  1875. #endif
  1876. static __meminit
  1877. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  1878. {
  1879. int i;
  1880. struct pglist_data *pgdat = zone->zone_pgdat;
  1881. size_t alloc_size;
  1882. /*
  1883. * The per-page waitqueue mechanism uses hashed waitqueues
  1884. * per zone.
  1885. */
  1886. zone->wait_table_hash_nr_entries =
  1887. wait_table_hash_nr_entries(zone_size_pages);
  1888. zone->wait_table_bits =
  1889. wait_table_bits(zone->wait_table_hash_nr_entries);
  1890. alloc_size = zone->wait_table_hash_nr_entries
  1891. * sizeof(wait_queue_head_t);
  1892. if (system_state == SYSTEM_BOOTING) {
  1893. zone->wait_table = (wait_queue_head_t *)
  1894. alloc_bootmem_node(pgdat, alloc_size);
  1895. } else {
  1896. /*
  1897. * This case means that a zone whose size was 0 gets new memory
  1898. * via memory hot-add.
  1899. * But it may be the case that a new node was hot-added. In
  1900. * this case vmalloc() will not be able to use this new node's
  1901. * memory - this wait_table must be initialized to use this new
  1902. * node itself as well.
  1903. * To use this new node's memory, further consideration will be
  1904. * necessary.
  1905. */
  1906. zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
  1907. }
  1908. if (!zone->wait_table)
  1909. return -ENOMEM;
  1910. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  1911. init_waitqueue_head(zone->wait_table + i);
  1912. return 0;
  1913. }
  1914. static __meminit void zone_pcp_init(struct zone *zone)
  1915. {
  1916. int cpu;
  1917. unsigned long batch = zone_batchsize(zone);
  1918. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  1919. #ifdef CONFIG_NUMA
  1920. /* Early boot. Slab allocator not functional yet */
  1921. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  1922. setup_pageset(&boot_pageset[cpu],0);
  1923. #else
  1924. setup_pageset(zone_pcp(zone,cpu), batch);
  1925. #endif
  1926. }
  1927. if (zone->present_pages)
  1928. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  1929. zone->name, zone->present_pages, batch);
  1930. }
  1931. __meminit int init_currently_empty_zone(struct zone *zone,
  1932. unsigned long zone_start_pfn,
  1933. unsigned long size,
  1934. enum memmap_context context)
  1935. {
  1936. struct pglist_data *pgdat = zone->zone_pgdat;
  1937. int ret;
  1938. ret = zone_wait_table_init(zone, size);
  1939. if (ret)
  1940. return ret;
  1941. pgdat->nr_zones = zone_idx(zone) + 1;
  1942. zone->zone_start_pfn = zone_start_pfn;
  1943. memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
  1944. zone_init_free_lists(pgdat, zone, zone->spanned_pages);
  1945. return 0;
  1946. }
  1947. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  1948. /*
  1949. * Basic iterator support. Return the first range of PFNs for a node
  1950. * Note: nid == MAX_NUMNODES returns first region regardless of node
  1951. */
  1952. static int __init first_active_region_index_in_nid(int nid)
  1953. {
  1954. int i;
  1955. for (i = 0; i < nr_nodemap_entries; i++)
  1956. if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
  1957. return i;
  1958. return -1;
  1959. }
  1960. /*
  1961. * Basic iterator support. Return the next active range of PFNs for a node
  1962. * Note: nid == MAX_NUMNODES returns next region regardles of node
  1963. */
  1964. static int __init next_active_region_index_in_nid(int index, int nid)
  1965. {
  1966. for (index = index + 1; index < nr_nodemap_entries; index++)
  1967. if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
  1968. return index;
  1969. return -1;
  1970. }
  1971. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  1972. /*
  1973. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  1974. * Architectures may implement their own version but if add_active_range()
  1975. * was used and there are no special requirements, this is a convenient
  1976. * alternative
  1977. */
  1978. int __init early_pfn_to_nid(unsigned long pfn)
  1979. {
  1980. int i;
  1981. for (i = 0; i < nr_nodemap_entries; i++) {
  1982. unsigned long start_pfn = early_node_map[i].start_pfn;
  1983. unsigned long end_pfn = early_node_map[i].end_pfn;
  1984. if (start_pfn <= pfn && pfn < end_pfn)
  1985. return early_node_map[i].nid;
  1986. }
  1987. return 0;
  1988. }
  1989. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  1990. /* Basic iterator support to walk early_node_map[] */
  1991. #define for_each_active_range_index_in_nid(i, nid) \
  1992. for (i = first_active_region_index_in_nid(nid); i != -1; \
  1993. i = next_active_region_index_in_nid(i, nid))
  1994. /**
  1995. * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
  1996. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  1997. * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
  1998. *
  1999. * If an architecture guarantees that all ranges registered with
  2000. * add_active_ranges() contain no holes and may be freed, this
  2001. * this function may be used instead of calling free_bootmem() manually.
  2002. */
  2003. void __init free_bootmem_with_active_regions(int nid,
  2004. unsigned long max_low_pfn)
  2005. {
  2006. int i;
  2007. for_each_active_range_index_in_nid(i, nid) {
  2008. unsigned long size_pages = 0;
  2009. unsigned long end_pfn = early_node_map[i].end_pfn;
  2010. if (early_node_map[i].start_pfn >= max_low_pfn)
  2011. continue;
  2012. if (end_pfn > max_low_pfn)
  2013. end_pfn = max_low_pfn;
  2014. size_pages = end_pfn - early_node_map[i].start_pfn;
  2015. free_bootmem_node(NODE_DATA(early_node_map[i].nid),
  2016. PFN_PHYS(early_node_map[i].start_pfn),
  2017. size_pages << PAGE_SHIFT);
  2018. }
  2019. }
  2020. /**
  2021. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  2022. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  2023. *
  2024. * If an architecture guarantees that all ranges registered with
  2025. * add_active_ranges() contain no holes and may be freed, this
  2026. * function may be used instead of calling memory_present() manually.
  2027. */
  2028. void __init sparse_memory_present_with_active_regions(int nid)
  2029. {
  2030. int i;
  2031. for_each_active_range_index_in_nid(i, nid)
  2032. memory_present(early_node_map[i].nid,
  2033. early_node_map[i].start_pfn,
  2034. early_node_map[i].end_pfn);
  2035. }
  2036. /**
  2037. * push_node_boundaries - Push node boundaries to at least the requested boundary
  2038. * @nid: The nid of the node to push the boundary for
  2039. * @start_pfn: The start pfn of the node
  2040. * @end_pfn: The end pfn of the node
  2041. *
  2042. * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
  2043. * time. Specifically, on x86_64, SRAT will report ranges that can potentially
  2044. * be hotplugged even though no physical memory exists. This function allows
  2045. * an arch to push out the node boundaries so mem_map is allocated that can
  2046. * be used later.
  2047. */
  2048. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  2049. void __init push_node_boundaries(unsigned int nid,
  2050. unsigned long start_pfn, unsigned long end_pfn)
  2051. {
  2052. printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
  2053. nid, start_pfn, end_pfn);
  2054. /* Initialise the boundary for this node if necessary */
  2055. if (node_boundary_end_pfn[nid] == 0)
  2056. node_boundary_start_pfn[nid] = -1UL;
  2057. /* Update the boundaries */
  2058. if (node_boundary_start_pfn[nid] > start_pfn)
  2059. node_boundary_start_pfn[nid] = start_pfn;
  2060. if (node_boundary_end_pfn[nid] < end_pfn)
  2061. node_boundary_end_pfn[nid] = end_pfn;
  2062. }
  2063. /* If necessary, push the node boundary out for reserve hotadd */
  2064. static void __init account_node_boundary(unsigned int nid,
  2065. unsigned long *start_pfn, unsigned long *end_pfn)
  2066. {
  2067. printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
  2068. nid, *start_pfn, *end_pfn);
  2069. /* Return if boundary information has not been provided */
  2070. if (node_boundary_end_pfn[nid] == 0)
  2071. return;
  2072. /* Check the boundaries and update if necessary */
  2073. if (node_boundary_start_pfn[nid] < *start_pfn)
  2074. *start_pfn = node_boundary_start_pfn[nid];
  2075. if (node_boundary_end_pfn[nid] > *end_pfn)
  2076. *end_pfn = node_boundary_end_pfn[nid];
  2077. }
  2078. #else
  2079. void __init push_node_boundaries(unsigned int nid,
  2080. unsigned long start_pfn, unsigned long end_pfn) {}
  2081. static void __init account_node_boundary(unsigned int nid,
  2082. unsigned long *start_pfn, unsigned long *end_pfn) {}
  2083. #endif
  2084. /**
  2085. * get_pfn_range_for_nid - Return the start and end page frames for a node
  2086. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  2087. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  2088. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  2089. *
  2090. * It returns the start and end page frame of a node based on information
  2091. * provided by an arch calling add_active_range(). If called for a node
  2092. * with no available memory, a warning is printed and the start and end
  2093. * PFNs will be 0.
  2094. */
  2095. void __init get_pfn_range_for_nid(unsigned int nid,
  2096. unsigned long *start_pfn, unsigned long *end_pfn)
  2097. {
  2098. int i;
  2099. *start_pfn = -1UL;
  2100. *end_pfn = 0;
  2101. for_each_active_range_index_in_nid(i, nid) {
  2102. *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
  2103. *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
  2104. }
  2105. if (*start_pfn == -1UL) {
  2106. printk(KERN_WARNING "Node %u active with no memory\n", nid);
  2107. *start_pfn = 0;
  2108. }
  2109. /* Push the node boundaries out if requested */
  2110. account_node_boundary(nid, start_pfn, end_pfn);
  2111. }
  2112. /*
  2113. * Return the number of pages a zone spans in a node, including holes
  2114. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  2115. */
  2116. unsigned long __init zone_spanned_pages_in_node(int nid,
  2117. unsigned long zone_type,
  2118. unsigned long *ignored)
  2119. {
  2120. unsigned long node_start_pfn, node_end_pfn;
  2121. unsigned long zone_start_pfn, zone_end_pfn;
  2122. /* Get the start and end of the node and zone */
  2123. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2124. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  2125. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  2126. /* Check that this node has pages within the zone's required range */
  2127. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  2128. return 0;
  2129. /* Move the zone boundaries inside the node if necessary */
  2130. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  2131. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  2132. /* Return the spanned pages */
  2133. return zone_end_pfn - zone_start_pfn;
  2134. }
  2135. /*
  2136. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  2137. * then all holes in the requested range will be accounted for.
  2138. */
  2139. unsigned long __init __absent_pages_in_range(int nid,
  2140. unsigned long range_start_pfn,
  2141. unsigned long range_end_pfn)
  2142. {
  2143. int i = 0;
  2144. unsigned long prev_end_pfn = 0, hole_pages = 0;
  2145. unsigned long start_pfn;
  2146. /* Find the end_pfn of the first active range of pfns in the node */
  2147. i = first_active_region_index_in_nid(nid);
  2148. if (i == -1)
  2149. return 0;
  2150. /* Account for ranges before physical memory on this node */
  2151. if (early_node_map[i].start_pfn > range_start_pfn)
  2152. hole_pages = early_node_map[i].start_pfn - range_start_pfn;
  2153. prev_end_pfn = early_node_map[i].start_pfn;
  2154. /* Find all holes for the zone within the node */
  2155. for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
  2156. /* No need to continue if prev_end_pfn is outside the zone */
  2157. if (prev_end_pfn >= range_end_pfn)
  2158. break;
  2159. /* Make sure the end of the zone is not within the hole */
  2160. start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
  2161. prev_end_pfn = max(prev_end_pfn, range_start_pfn);
  2162. /* Update the hole size cound and move on */
  2163. if (start_pfn > range_start_pfn) {
  2164. BUG_ON(prev_end_pfn > start_pfn);
  2165. hole_pages += start_pfn - prev_end_pfn;
  2166. }
  2167. prev_end_pfn = early_node_map[i].end_pfn;
  2168. }
  2169. /* Account for ranges past physical memory on this node */
  2170. if (range_end_pfn > prev_end_pfn)
  2171. hole_pages += range_end_pfn -
  2172. max(range_start_pfn, prev_end_pfn);
  2173. return hole_pages;
  2174. }
  2175. /**
  2176. * absent_pages_in_range - Return number of page frames in holes within a range
  2177. * @start_pfn: The start PFN to start searching for holes
  2178. * @end_pfn: The end PFN to stop searching for holes
  2179. *
  2180. * It returns the number of pages frames in memory holes within a range.
  2181. */
  2182. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  2183. unsigned long end_pfn)
  2184. {
  2185. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  2186. }
  2187. /* Return the number of page frames in holes in a zone on a node */
  2188. unsigned long __init zone_absent_pages_in_node(int nid,
  2189. unsigned long zone_type,
  2190. unsigned long *ignored)
  2191. {
  2192. unsigned long node_start_pfn, node_end_pfn;
  2193. unsigned long zone_start_pfn, zone_end_pfn;
  2194. get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
  2195. zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
  2196. node_start_pfn);
  2197. zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
  2198. node_end_pfn);
  2199. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  2200. }
  2201. #else
  2202. static inline unsigned long zone_spanned_pages_in_node(int nid,
  2203. unsigned long zone_type,
  2204. unsigned long *zones_size)
  2205. {
  2206. return zones_size[zone_type];
  2207. }
  2208. static inline unsigned long zone_absent_pages_in_node(int nid,
  2209. unsigned long zone_type,
  2210. unsigned long *zholes_size)
  2211. {
  2212. if (!zholes_size)
  2213. return 0;
  2214. return zholes_size[zone_type];
  2215. }
  2216. #endif
  2217. static void __init calculate_node_totalpages(struct pglist_data *pgdat,
  2218. unsigned long *zones_size, unsigned long *zholes_size)
  2219. {
  2220. unsigned long realtotalpages, totalpages = 0;
  2221. enum zone_type i;
  2222. for (i = 0; i < MAX_NR_ZONES; i++)
  2223. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  2224. zones_size);
  2225. pgdat->node_spanned_pages = totalpages;
  2226. realtotalpages = totalpages;
  2227. for (i = 0; i < MAX_NR_ZONES; i++)
  2228. realtotalpages -=
  2229. zone_absent_pages_in_node(pgdat->node_id, i,
  2230. zholes_size);
  2231. pgdat->node_present_pages = realtotalpages;
  2232. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  2233. realtotalpages);
  2234. }
  2235. /*
  2236. * Set up the zone data structures:
  2237. * - mark all pages reserved
  2238. * - mark all memory queues empty
  2239. * - clear the memory bitmaps
  2240. */
  2241. static void __meminit free_area_init_core(struct pglist_data *pgdat,
  2242. unsigned long *zones_size, unsigned long *zholes_size)
  2243. {
  2244. enum zone_type j;
  2245. int nid = pgdat->node_id;
  2246. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  2247. int ret;
  2248. pgdat_resize_init(pgdat);
  2249. pgdat->nr_zones = 0;
  2250. init_waitqueue_head(&pgdat->kswapd_wait);
  2251. pgdat->kswapd_max_order = 0;
  2252. for (j = 0; j < MAX_NR_ZONES; j++) {
  2253. struct zone *zone = pgdat->node_zones + j;
  2254. unsigned long size, realsize, memmap_pages;
  2255. size = zone_spanned_pages_in_node(nid, j, zones_size);
  2256. realsize = size - zone_absent_pages_in_node(nid, j,
  2257. zholes_size);
  2258. /*
  2259. * Adjust realsize so that it accounts for how much memory
  2260. * is used by this zone for memmap. This affects the watermark
  2261. * and per-cpu initialisations
  2262. */
  2263. memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
  2264. if (realsize >= memmap_pages) {
  2265. realsize -= memmap_pages;
  2266. printk(KERN_DEBUG
  2267. " %s zone: %lu pages used for memmap\n",
  2268. zone_names[j], memmap_pages);
  2269. } else
  2270. printk(KERN_WARNING
  2271. " %s zone: %lu pages exceeds realsize %lu\n",
  2272. zone_names[j], memmap_pages, realsize);
  2273. /* Account for reserved DMA pages */
  2274. if (j == ZONE_DMA && realsize > dma_reserve) {
  2275. realsize -= dma_reserve;
  2276. printk(KERN_DEBUG " DMA zone: %lu pages reserved\n",
  2277. dma_reserve);
  2278. }
  2279. if (!is_highmem_idx(j))
  2280. nr_kernel_pages += realsize;
  2281. nr_all_pages += realsize;
  2282. zone->spanned_pages = size;
  2283. zone->present_pages = realsize;
  2284. #ifdef CONFIG_NUMA
  2285. zone->node = nid;
  2286. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  2287. / 100;
  2288. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  2289. #endif
  2290. zone->name = zone_names[j];
  2291. spin_lock_init(&zone->lock);
  2292. spin_lock_init(&zone->lru_lock);
  2293. zone_seqlock_init(zone);
  2294. zone->zone_pgdat = pgdat;
  2295. zone->free_pages = 0;
  2296. zone->prev_priority = DEF_PRIORITY;
  2297. zone_pcp_init(zone);
  2298. INIT_LIST_HEAD(&zone->active_list);
  2299. INIT_LIST_HEAD(&zone->inactive_list);
  2300. zone->nr_scan_active = 0;
  2301. zone->nr_scan_inactive = 0;
  2302. zone->nr_active = 0;
  2303. zone->nr_inactive = 0;
  2304. zap_zone_vm_stats(zone);
  2305. atomic_set(&zone->reclaim_in_progress, 0);
  2306. if (!size)
  2307. continue;
  2308. ret = init_currently_empty_zone(zone, zone_start_pfn,
  2309. size, MEMMAP_EARLY);
  2310. BUG_ON(ret);
  2311. zone_start_pfn += size;
  2312. }
  2313. }
  2314. static void __init alloc_node_mem_map(struct pglist_data *pgdat)
  2315. {
  2316. /* Skip empty nodes */
  2317. if (!pgdat->node_spanned_pages)
  2318. return;
  2319. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  2320. /* ia64 gets its own node_mem_map, before this, without bootmem */
  2321. if (!pgdat->node_mem_map) {
  2322. unsigned long size, start, end;
  2323. struct page *map;
  2324. /*
  2325. * The zone's endpoints aren't required to be MAX_ORDER
  2326. * aligned but the node_mem_map endpoints must be in order
  2327. * for the buddy allocator to function correctly.
  2328. */
  2329. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  2330. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  2331. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  2332. size = (end - start) * sizeof(struct page);
  2333. map = alloc_remap(pgdat->node_id, size);
  2334. if (!map)
  2335. map = alloc_bootmem_node(pgdat, size);
  2336. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  2337. }
  2338. #ifdef CONFIG_FLATMEM
  2339. /*
  2340. * With no DISCONTIG, the global mem_map is just set as node 0's
  2341. */
  2342. if (pgdat == NODE_DATA(0)) {
  2343. mem_map = NODE_DATA(0)->node_mem_map;
  2344. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2345. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  2346. mem_map -= pgdat->node_start_pfn;
  2347. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  2348. }
  2349. #endif
  2350. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  2351. }
  2352. void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
  2353. unsigned long *zones_size, unsigned long node_start_pfn,
  2354. unsigned long *zholes_size)
  2355. {
  2356. pgdat->node_id = nid;
  2357. pgdat->node_start_pfn = node_start_pfn;
  2358. calculate_node_totalpages(pgdat, zones_size, zholes_size);
  2359. alloc_node_mem_map(pgdat);
  2360. free_area_init_core(pgdat, zones_size, zholes_size);
  2361. }
  2362. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  2363. /**
  2364. * add_active_range - Register a range of PFNs backed by physical memory
  2365. * @nid: The node ID the range resides on
  2366. * @start_pfn: The start PFN of the available physical memory
  2367. * @end_pfn: The end PFN of the available physical memory
  2368. *
  2369. * These ranges are stored in an early_node_map[] and later used by
  2370. * free_area_init_nodes() to calculate zone sizes and holes. If the
  2371. * range spans a memory hole, it is up to the architecture to ensure
  2372. * the memory is not freed by the bootmem allocator. If possible
  2373. * the range being registered will be merged with existing ranges.
  2374. */
  2375. void __init add_active_range(unsigned int nid, unsigned long start_pfn,
  2376. unsigned long end_pfn)
  2377. {
  2378. int i;
  2379. printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
  2380. "%d entries of %d used\n",
  2381. nid, start_pfn, end_pfn,
  2382. nr_nodemap_entries, MAX_ACTIVE_REGIONS);
  2383. /* Merge with existing active regions if possible */
  2384. for (i = 0; i < nr_nodemap_entries; i++) {
  2385. if (early_node_map[i].nid != nid)
  2386. continue;
  2387. /* Skip if an existing region covers this new one */
  2388. if (start_pfn >= early_node_map[i].start_pfn &&
  2389. end_pfn <= early_node_map[i].end_pfn)
  2390. return;
  2391. /* Merge forward if suitable */
  2392. if (start_pfn <= early_node_map[i].end_pfn &&
  2393. end_pfn > early_node_map[i].end_pfn) {
  2394. early_node_map[i].end_pfn = end_pfn;
  2395. return;
  2396. }
  2397. /* Merge backward if suitable */
  2398. if (start_pfn < early_node_map[i].end_pfn &&
  2399. end_pfn >= early_node_map[i].start_pfn) {
  2400. early_node_map[i].start_pfn = start_pfn;
  2401. return;
  2402. }
  2403. }
  2404. /* Check that early_node_map is large enough */
  2405. if (i >= MAX_ACTIVE_REGIONS) {
  2406. printk(KERN_CRIT "More than %d memory regions, truncating\n",
  2407. MAX_ACTIVE_REGIONS);
  2408. return;
  2409. }
  2410. early_node_map[i].nid = nid;
  2411. early_node_map[i].start_pfn = start_pfn;
  2412. early_node_map[i].end_pfn = end_pfn;
  2413. nr_nodemap_entries = i + 1;
  2414. }
  2415. /**
  2416. * shrink_active_range - Shrink an existing registered range of PFNs
  2417. * @nid: The node id the range is on that should be shrunk
  2418. * @old_end_pfn: The old end PFN of the range
  2419. * @new_end_pfn: The new PFN of the range
  2420. *
  2421. * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
  2422. * The map is kept at the end physical page range that has already been
  2423. * registered with add_active_range(). This function allows an arch to shrink
  2424. * an existing registered range.
  2425. */
  2426. void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
  2427. unsigned long new_end_pfn)
  2428. {
  2429. int i;
  2430. /* Find the old active region end and shrink */
  2431. for_each_active_range_index_in_nid(i, nid)
  2432. if (early_node_map[i].end_pfn == old_end_pfn) {
  2433. early_node_map[i].end_pfn = new_end_pfn;
  2434. break;
  2435. }
  2436. }
  2437. /**
  2438. * remove_all_active_ranges - Remove all currently registered regions
  2439. *
  2440. * During discovery, it may be found that a table like SRAT is invalid
  2441. * and an alternative discovery method must be used. This function removes
  2442. * all currently registered regions.
  2443. */
  2444. void __init remove_all_active_ranges(void)
  2445. {
  2446. memset(early_node_map, 0, sizeof(early_node_map));
  2447. nr_nodemap_entries = 0;
  2448. #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
  2449. memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
  2450. memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
  2451. #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
  2452. }
  2453. /* Compare two active node_active_regions */
  2454. static int __init cmp_node_active_region(const void *a, const void *b)
  2455. {
  2456. struct node_active_region *arange = (struct node_active_region *)a;
  2457. struct node_active_region *brange = (struct node_active_region *)b;
  2458. /* Done this way to avoid overflows */
  2459. if (arange->start_pfn > brange->start_pfn)
  2460. return 1;
  2461. if (arange->start_pfn < brange->start_pfn)
  2462. return -1;
  2463. return 0;
  2464. }
  2465. /* sort the node_map by start_pfn */
  2466. static void __init sort_node_map(void)
  2467. {
  2468. sort(early_node_map, (size_t)nr_nodemap_entries,
  2469. sizeof(struct node_active_region),
  2470. cmp_node_active_region, NULL);
  2471. }
  2472. /* Find the lowest pfn for a node. This depends on a sorted early_node_map */
  2473. unsigned long __init find_min_pfn_for_node(unsigned long nid)
  2474. {
  2475. int i;
  2476. /* Regions in the early_node_map can be in any order */
  2477. sort_node_map();
  2478. /* Assuming a sorted map, the first range found has the starting pfn */
  2479. for_each_active_range_index_in_nid(i, nid)
  2480. return early_node_map[i].start_pfn;
  2481. printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid);
  2482. return 0;
  2483. }
  2484. /**
  2485. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  2486. *
  2487. * It returns the minimum PFN based on information provided via
  2488. * add_active_range().
  2489. */
  2490. unsigned long __init find_min_pfn_with_active_regions(void)
  2491. {
  2492. return find_min_pfn_for_node(MAX_NUMNODES);
  2493. }
  2494. /**
  2495. * find_max_pfn_with_active_regions - Find the maximum PFN registered
  2496. *
  2497. * It returns the maximum PFN based on information provided via
  2498. * add_active_range().
  2499. */
  2500. unsigned long __init find_max_pfn_with_active_regions(void)
  2501. {
  2502. int i;
  2503. unsigned long max_pfn = 0;
  2504. for (i = 0; i < nr_nodemap_entries; i++)
  2505. max_pfn = max(max_pfn, early_node_map[i].end_pfn);
  2506. return max_pfn;
  2507. }
  2508. /**
  2509. * free_area_init_nodes - Initialise all pg_data_t and zone data
  2510. * @max_zone_pfn: an array of max PFNs for each zone
  2511. *
  2512. * This will call free_area_init_node() for each active node in the system.
  2513. * Using the page ranges provided by add_active_range(), the size of each
  2514. * zone in each node and their holes is calculated. If the maximum PFN
  2515. * between two adjacent zones match, it is assumed that the zone is empty.
  2516. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  2517. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  2518. * starts where the previous one ended. For example, ZONE_DMA32 starts
  2519. * at arch_max_dma_pfn.
  2520. */
  2521. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  2522. {
  2523. unsigned long nid;
  2524. enum zone_type i;
  2525. /* Record where the zone boundaries are */
  2526. memset(arch_zone_lowest_possible_pfn, 0,
  2527. sizeof(arch_zone_lowest_possible_pfn));
  2528. memset(arch_zone_highest_possible_pfn, 0,
  2529. sizeof(arch_zone_highest_possible_pfn));
  2530. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  2531. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  2532. for (i = 1; i < MAX_NR_ZONES; i++) {
  2533. arch_zone_lowest_possible_pfn[i] =
  2534. arch_zone_highest_possible_pfn[i-1];
  2535. arch_zone_highest_possible_pfn[i] =
  2536. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  2537. }
  2538. /* Print out the zone ranges */
  2539. printk("Zone PFN ranges:\n");
  2540. for (i = 0; i < MAX_NR_ZONES; i++)
  2541. printk(" %-8s %8lu -> %8lu\n",
  2542. zone_names[i],
  2543. arch_zone_lowest_possible_pfn[i],
  2544. arch_zone_highest_possible_pfn[i]);
  2545. /* Print out the early_node_map[] */
  2546. printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
  2547. for (i = 0; i < nr_nodemap_entries; i++)
  2548. printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid,
  2549. early_node_map[i].start_pfn,
  2550. early_node_map[i].end_pfn);
  2551. /* Initialise every node */
  2552. for_each_online_node(nid) {
  2553. pg_data_t *pgdat = NODE_DATA(nid);
  2554. free_area_init_node(nid, pgdat, NULL,
  2555. find_min_pfn_for_node(nid), NULL);
  2556. }
  2557. }
  2558. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  2559. /**
  2560. * set_dma_reserve - set the specified number of pages reserved in the first zone
  2561. * @new_dma_reserve: The number of pages to mark reserved
  2562. *
  2563. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  2564. * In the DMA zone, a significant percentage may be consumed by kernel image
  2565. * and other unfreeable allocations which can skew the watermarks badly. This
  2566. * function may optionally be used to account for unfreeable pages in the
  2567. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  2568. * smaller per-cpu batchsize.
  2569. */
  2570. void __init set_dma_reserve(unsigned long new_dma_reserve)
  2571. {
  2572. dma_reserve = new_dma_reserve;
  2573. }
  2574. #ifndef CONFIG_NEED_MULTIPLE_NODES
  2575. static bootmem_data_t contig_bootmem_data;
  2576. struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
  2577. EXPORT_SYMBOL(contig_page_data);
  2578. #endif
  2579. void __init free_area_init(unsigned long *zones_size)
  2580. {
  2581. free_area_init_node(0, NODE_DATA(0), zones_size,
  2582. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  2583. }
  2584. static int page_alloc_cpu_notify(struct notifier_block *self,
  2585. unsigned long action, void *hcpu)
  2586. {
  2587. int cpu = (unsigned long)hcpu;
  2588. if (action == CPU_DEAD) {
  2589. local_irq_disable();
  2590. __drain_pages(cpu);
  2591. vm_events_fold_cpu(cpu);
  2592. local_irq_enable();
  2593. refresh_cpu_vm_stats(cpu);
  2594. }
  2595. return NOTIFY_OK;
  2596. }
  2597. void __init page_alloc_init(void)
  2598. {
  2599. hotcpu_notifier(page_alloc_cpu_notify, 0);
  2600. }
  2601. /*
  2602. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  2603. * or min_free_kbytes changes.
  2604. */
  2605. static void calculate_totalreserve_pages(void)
  2606. {
  2607. struct pglist_data *pgdat;
  2608. unsigned long reserve_pages = 0;
  2609. enum zone_type i, j;
  2610. for_each_online_pgdat(pgdat) {
  2611. for (i = 0; i < MAX_NR_ZONES; i++) {
  2612. struct zone *zone = pgdat->node_zones + i;
  2613. unsigned long max = 0;
  2614. /* Find valid and maximum lowmem_reserve in the zone */
  2615. for (j = i; j < MAX_NR_ZONES; j++) {
  2616. if (zone->lowmem_reserve[j] > max)
  2617. max = zone->lowmem_reserve[j];
  2618. }
  2619. /* we treat pages_high as reserved pages. */
  2620. max += zone->pages_high;
  2621. if (max > zone->present_pages)
  2622. max = zone->present_pages;
  2623. reserve_pages += max;
  2624. }
  2625. }
  2626. totalreserve_pages = reserve_pages;
  2627. }
  2628. /*
  2629. * setup_per_zone_lowmem_reserve - called whenever
  2630. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  2631. * has a correct pages reserved value, so an adequate number of
  2632. * pages are left in the zone after a successful __alloc_pages().
  2633. */
  2634. static void setup_per_zone_lowmem_reserve(void)
  2635. {
  2636. struct pglist_data *pgdat;
  2637. enum zone_type j, idx;
  2638. for_each_online_pgdat(pgdat) {
  2639. for (j = 0; j < MAX_NR_ZONES; j++) {
  2640. struct zone *zone = pgdat->node_zones + j;
  2641. unsigned long present_pages = zone->present_pages;
  2642. zone->lowmem_reserve[j] = 0;
  2643. idx = j;
  2644. while (idx) {
  2645. struct zone *lower_zone;
  2646. idx--;
  2647. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  2648. sysctl_lowmem_reserve_ratio[idx] = 1;
  2649. lower_zone = pgdat->node_zones + idx;
  2650. lower_zone->lowmem_reserve[j] = present_pages /
  2651. sysctl_lowmem_reserve_ratio[idx];
  2652. present_pages += lower_zone->present_pages;
  2653. }
  2654. }
  2655. }
  2656. /* update totalreserve_pages */
  2657. calculate_totalreserve_pages();
  2658. }
  2659. /**
  2660. * setup_per_zone_pages_min - called when min_free_kbytes changes.
  2661. *
  2662. * Ensures that the pages_{min,low,high} values for each zone are set correctly
  2663. * with respect to min_free_kbytes.
  2664. */
  2665. void setup_per_zone_pages_min(void)
  2666. {
  2667. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  2668. unsigned long lowmem_pages = 0;
  2669. struct zone *zone;
  2670. unsigned long flags;
  2671. /* Calculate total number of !ZONE_HIGHMEM pages */
  2672. for_each_zone(zone) {
  2673. if (!is_highmem(zone))
  2674. lowmem_pages += zone->present_pages;
  2675. }
  2676. for_each_zone(zone) {
  2677. u64 tmp;
  2678. spin_lock_irqsave(&zone->lru_lock, flags);
  2679. tmp = (u64)pages_min * zone->present_pages;
  2680. do_div(tmp, lowmem_pages);
  2681. if (is_highmem(zone)) {
  2682. /*
  2683. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  2684. * need highmem pages, so cap pages_min to a small
  2685. * value here.
  2686. *
  2687. * The (pages_high-pages_low) and (pages_low-pages_min)
  2688. * deltas controls asynch page reclaim, and so should
  2689. * not be capped for highmem.
  2690. */
  2691. int min_pages;
  2692. min_pages = zone->present_pages / 1024;
  2693. if (min_pages < SWAP_CLUSTER_MAX)
  2694. min_pages = SWAP_CLUSTER_MAX;
  2695. if (min_pages > 128)
  2696. min_pages = 128;
  2697. zone->pages_min = min_pages;
  2698. } else {
  2699. /*
  2700. * If it's a lowmem zone, reserve a number of pages
  2701. * proportionate to the zone's size.
  2702. */
  2703. zone->pages_min = tmp;
  2704. }
  2705. zone->pages_low = zone->pages_min + (tmp >> 2);
  2706. zone->pages_high = zone->pages_min + (tmp >> 1);
  2707. spin_unlock_irqrestore(&zone->lru_lock, flags);
  2708. }
  2709. /* update totalreserve_pages */
  2710. calculate_totalreserve_pages();
  2711. }
  2712. /*
  2713. * Initialise min_free_kbytes.
  2714. *
  2715. * For small machines we want it small (128k min). For large machines
  2716. * we want it large (64MB max). But it is not linear, because network
  2717. * bandwidth does not increase linearly with machine size. We use
  2718. *
  2719. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  2720. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  2721. *
  2722. * which yields
  2723. *
  2724. * 16MB: 512k
  2725. * 32MB: 724k
  2726. * 64MB: 1024k
  2727. * 128MB: 1448k
  2728. * 256MB: 2048k
  2729. * 512MB: 2896k
  2730. * 1024MB: 4096k
  2731. * 2048MB: 5792k
  2732. * 4096MB: 8192k
  2733. * 8192MB: 11584k
  2734. * 16384MB: 16384k
  2735. */
  2736. static int __init init_per_zone_pages_min(void)
  2737. {
  2738. unsigned long lowmem_kbytes;
  2739. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  2740. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  2741. if (min_free_kbytes < 128)
  2742. min_free_kbytes = 128;
  2743. if (min_free_kbytes > 65536)
  2744. min_free_kbytes = 65536;
  2745. setup_per_zone_pages_min();
  2746. setup_per_zone_lowmem_reserve();
  2747. return 0;
  2748. }
  2749. module_init(init_per_zone_pages_min)
  2750. /*
  2751. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  2752. * that we can call two helper functions whenever min_free_kbytes
  2753. * changes.
  2754. */
  2755. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  2756. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2757. {
  2758. proc_dointvec(table, write, file, buffer, length, ppos);
  2759. setup_per_zone_pages_min();
  2760. return 0;
  2761. }
  2762. #ifdef CONFIG_NUMA
  2763. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  2764. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2765. {
  2766. struct zone *zone;
  2767. int rc;
  2768. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2769. if (rc)
  2770. return rc;
  2771. for_each_zone(zone)
  2772. zone->min_unmapped_pages = (zone->present_pages *
  2773. sysctl_min_unmapped_ratio) / 100;
  2774. return 0;
  2775. }
  2776. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  2777. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2778. {
  2779. struct zone *zone;
  2780. int rc;
  2781. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2782. if (rc)
  2783. return rc;
  2784. for_each_zone(zone)
  2785. zone->min_slab_pages = (zone->present_pages *
  2786. sysctl_min_slab_ratio) / 100;
  2787. return 0;
  2788. }
  2789. #endif
  2790. /*
  2791. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  2792. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  2793. * whenever sysctl_lowmem_reserve_ratio changes.
  2794. *
  2795. * The reserve ratio obviously has absolutely no relation with the
  2796. * pages_min watermarks. The lowmem reserve ratio can only make sense
  2797. * if in function of the boot time zone sizes.
  2798. */
  2799. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  2800. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2801. {
  2802. proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2803. setup_per_zone_lowmem_reserve();
  2804. return 0;
  2805. }
  2806. /*
  2807. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  2808. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  2809. * can have before it gets flushed back to buddy allocator.
  2810. */
  2811. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  2812. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2813. {
  2814. struct zone *zone;
  2815. unsigned int cpu;
  2816. int ret;
  2817. ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2818. if (!write || (ret == -EINVAL))
  2819. return ret;
  2820. for_each_zone(zone) {
  2821. for_each_online_cpu(cpu) {
  2822. unsigned long high;
  2823. high = zone->present_pages / percpu_pagelist_fraction;
  2824. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  2825. }
  2826. }
  2827. return 0;
  2828. }
  2829. int hashdist = HASHDIST_DEFAULT;
  2830. #ifdef CONFIG_NUMA
  2831. static int __init set_hashdist(char *str)
  2832. {
  2833. if (!str)
  2834. return 0;
  2835. hashdist = simple_strtoul(str, &str, 0);
  2836. return 1;
  2837. }
  2838. __setup("hashdist=", set_hashdist);
  2839. #endif
  2840. /*
  2841. * allocate a large system hash table from bootmem
  2842. * - it is assumed that the hash table must contain an exact power-of-2
  2843. * quantity of entries
  2844. * - limit is the number of hash buckets, not the total allocation size
  2845. */
  2846. void *__init alloc_large_system_hash(const char *tablename,
  2847. unsigned long bucketsize,
  2848. unsigned long numentries,
  2849. int scale,
  2850. int flags,
  2851. unsigned int *_hash_shift,
  2852. unsigned int *_hash_mask,
  2853. unsigned long limit)
  2854. {
  2855. unsigned long long max = limit;
  2856. unsigned long log2qty, size;
  2857. void *table = NULL;
  2858. /* allow the kernel cmdline to have a say */
  2859. if (!numentries) {
  2860. /* round applicable memory size up to nearest megabyte */
  2861. numentries = nr_kernel_pages;
  2862. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  2863. numentries >>= 20 - PAGE_SHIFT;
  2864. numentries <<= 20 - PAGE_SHIFT;
  2865. /* limit to 1 bucket per 2^scale bytes of low memory */
  2866. if (scale > PAGE_SHIFT)
  2867. numentries >>= (scale - PAGE_SHIFT);
  2868. else
  2869. numentries <<= (PAGE_SHIFT - scale);
  2870. /* Make sure we've got at least a 0-order allocation.. */
  2871. if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  2872. numentries = PAGE_SIZE / bucketsize;
  2873. }
  2874. numentries = roundup_pow_of_two(numentries);
  2875. /* limit allocation size to 1/16 total memory by default */
  2876. if (max == 0) {
  2877. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  2878. do_div(max, bucketsize);
  2879. }
  2880. if (numentries > max)
  2881. numentries = max;
  2882. log2qty = ilog2(numentries);
  2883. do {
  2884. size = bucketsize << log2qty;
  2885. if (flags & HASH_EARLY)
  2886. table = alloc_bootmem(size);
  2887. else if (hashdist)
  2888. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  2889. else {
  2890. unsigned long order;
  2891. for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
  2892. ;
  2893. table = (void*) __get_free_pages(GFP_ATOMIC, order);
  2894. }
  2895. } while (!table && size > PAGE_SIZE && --log2qty);
  2896. if (!table)
  2897. panic("Failed to allocate %s hash table\n", tablename);
  2898. printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
  2899. tablename,
  2900. (1U << log2qty),
  2901. ilog2(size) - PAGE_SHIFT,
  2902. size);
  2903. if (_hash_shift)
  2904. *_hash_shift = log2qty;
  2905. if (_hash_mask)
  2906. *_hash_mask = (1 << log2qty) - 1;
  2907. return table;
  2908. }
  2909. #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
  2910. struct page *pfn_to_page(unsigned long pfn)
  2911. {
  2912. return __pfn_to_page(pfn);
  2913. }
  2914. unsigned long page_to_pfn(struct page *page)
  2915. {
  2916. return __page_to_pfn(page);
  2917. }
  2918. EXPORT_SYMBOL(pfn_to_page);
  2919. EXPORT_SYMBOL(page_to_pfn);
  2920. #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
  2921. #if MAX_NUMNODES > 1
  2922. /*
  2923. * Find the highest possible node id.
  2924. */
  2925. int highest_possible_node_id(void)
  2926. {
  2927. unsigned int node;
  2928. unsigned int highest = 0;
  2929. for_each_node_mask(node, node_possible_map)
  2930. highest = node;
  2931. return highest;
  2932. }
  2933. EXPORT_SYMBOL(highest_possible_node_id);
  2934. #endif