page_alloc.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/config.h>
  17. #include <linux/stddef.h>
  18. #include <linux/mm.h>
  19. #include <linux/swap.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/compiler.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/suspend.h>
  27. #include <linux/pagevec.h>
  28. #include <linux/blkdev.h>
  29. #include <linux/slab.h>
  30. #include <linux/notifier.h>
  31. #include <linux/topology.h>
  32. #include <linux/sysctl.h>
  33. #include <linux/cpu.h>
  34. #include <linux/cpuset.h>
  35. #include <linux/memory_hotplug.h>
  36. #include <linux/nodemask.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/mempolicy.h>
  39. #include <asm/tlbflush.h>
  40. #include "internal.h"
  41. /*
  42. * MCD - HACK: Find somewhere to initialize this EARLY, or make this
  43. * initializer cleaner
  44. */
  45. nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
  46. EXPORT_SYMBOL(node_online_map);
  47. nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
  48. EXPORT_SYMBOL(node_possible_map);
  49. struct pglist_data *pgdat_list __read_mostly;
  50. unsigned long totalram_pages __read_mostly;
  51. unsigned long totalhigh_pages __read_mostly;
  52. long nr_swap_pages;
  53. int percpu_pagelist_fraction;
  54. static void fastcall free_hot_cold_page(struct page *page, int cold);
  55. /*
  56. * results with 256, 32 in the lowmem_reserve sysctl:
  57. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  58. * 1G machine -> (16M dma, 784M normal, 224M high)
  59. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  60. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  61. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  62. *
  63. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  64. * don't need any ZONE_NORMAL reservation
  65. */
  66. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
  67. EXPORT_SYMBOL(totalram_pages);
  68. /*
  69. * Used by page_zone() to look up the address of the struct zone whose
  70. * id is encoded in the upper bits of page->flags
  71. */
  72. struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
  73. EXPORT_SYMBOL(zone_table);
  74. static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
  75. int min_free_kbytes = 1024;
  76. unsigned long __initdata nr_kernel_pages;
  77. unsigned long __initdata nr_all_pages;
  78. #ifdef CONFIG_DEBUG_VM
  79. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  80. {
  81. int ret = 0;
  82. unsigned seq;
  83. unsigned long pfn = page_to_pfn(page);
  84. do {
  85. seq = zone_span_seqbegin(zone);
  86. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  87. ret = 1;
  88. else if (pfn < zone->zone_start_pfn)
  89. ret = 1;
  90. } while (zone_span_seqretry(zone, seq));
  91. return ret;
  92. }
  93. static int page_is_consistent(struct zone *zone, struct page *page)
  94. {
  95. #ifdef CONFIG_HOLES_IN_ZONE
  96. if (!pfn_valid(page_to_pfn(page)))
  97. return 0;
  98. #endif
  99. if (zone != page_zone(page))
  100. return 0;
  101. return 1;
  102. }
  103. /*
  104. * Temporary debugging check for pages not lying within a given zone.
  105. */
  106. static int bad_range(struct zone *zone, struct page *page)
  107. {
  108. if (page_outside_zone_boundaries(zone, page))
  109. return 1;
  110. if (!page_is_consistent(zone, page))
  111. return 1;
  112. return 0;
  113. }
  114. #else
  115. static inline int bad_range(struct zone *zone, struct page *page)
  116. {
  117. return 0;
  118. }
  119. #endif
  120. static void bad_page(struct page *page)
  121. {
  122. printk(KERN_EMERG "Bad page state in process '%s'\n"
  123. KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
  124. KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
  125. KERN_EMERG "Backtrace:\n",
  126. current->comm, page, (int)(2*sizeof(unsigned long)),
  127. (unsigned long)page->flags, page->mapping,
  128. page_mapcount(page), page_count(page));
  129. dump_stack();
  130. page->flags &= ~(1 << PG_lru |
  131. 1 << PG_private |
  132. 1 << PG_locked |
  133. 1 << PG_active |
  134. 1 << PG_dirty |
  135. 1 << PG_reclaim |
  136. 1 << PG_slab |
  137. 1 << PG_swapcache |
  138. 1 << PG_writeback );
  139. set_page_count(page, 0);
  140. reset_page_mapcount(page);
  141. page->mapping = NULL;
  142. add_taint(TAINT_BAD_PAGE);
  143. }
  144. /*
  145. * Higher-order pages are called "compound pages". They are structured thusly:
  146. *
  147. * The first PAGE_SIZE page is called the "head page".
  148. *
  149. * The remaining PAGE_SIZE pages are called "tail pages".
  150. *
  151. * All pages have PG_compound set. All pages have their ->private pointing at
  152. * the head page (even the head page has this).
  153. *
  154. * The first tail page's ->mapping, if non-zero, holds the address of the
  155. * compound page's put_page() function.
  156. *
  157. * The order of the allocation is stored in the first tail page's ->index
  158. * This is only for debug at present. This usage means that zero-order pages
  159. * may not be compound.
  160. */
  161. static void prep_compound_page(struct page *page, unsigned long order)
  162. {
  163. int i;
  164. int nr_pages = 1 << order;
  165. page[1].mapping = NULL;
  166. page[1].index = order;
  167. for (i = 0; i < nr_pages; i++) {
  168. struct page *p = page + i;
  169. SetPageCompound(p);
  170. set_page_private(p, (unsigned long)page);
  171. }
  172. }
  173. static void destroy_compound_page(struct page *page, unsigned long order)
  174. {
  175. int i;
  176. int nr_pages = 1 << order;
  177. if (unlikely(page[1].index != order))
  178. bad_page(page);
  179. for (i = 0; i < nr_pages; i++) {
  180. struct page *p = page + i;
  181. if (unlikely(!PageCompound(p) |
  182. (page_private(p) != (unsigned long)page)))
  183. bad_page(page);
  184. ClearPageCompound(p);
  185. }
  186. }
  187. /*
  188. * function for dealing with page's order in buddy system.
  189. * zone->lock is already acquired when we use these.
  190. * So, we don't need atomic page->flags operations here.
  191. */
  192. static inline unsigned long page_order(struct page *page) {
  193. return page_private(page);
  194. }
  195. static inline void set_page_order(struct page *page, int order) {
  196. set_page_private(page, order);
  197. __SetPagePrivate(page);
  198. }
  199. static inline void rmv_page_order(struct page *page)
  200. {
  201. __ClearPagePrivate(page);
  202. set_page_private(page, 0);
  203. }
  204. /*
  205. * Locate the struct page for both the matching buddy in our
  206. * pair (buddy1) and the combined O(n+1) page they form (page).
  207. *
  208. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  209. * the following equation:
  210. * B2 = B1 ^ (1 << O)
  211. * For example, if the starting buddy (buddy2) is #8 its order
  212. * 1 buddy is #10:
  213. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  214. *
  215. * 2) Any buddy B will have an order O+1 parent P which
  216. * satisfies the following equation:
  217. * P = B & ~(1 << O)
  218. *
  219. * Assumption: *_mem_map is contigious at least up to MAX_ORDER
  220. */
  221. static inline struct page *
  222. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  223. {
  224. unsigned long buddy_idx = page_idx ^ (1 << order);
  225. return page + (buddy_idx - page_idx);
  226. }
  227. static inline unsigned long
  228. __find_combined_index(unsigned long page_idx, unsigned int order)
  229. {
  230. return (page_idx & ~(1 << order));
  231. }
  232. /*
  233. * This function checks whether a page is free && is the buddy
  234. * we can do coalesce a page and its buddy if
  235. * (a) the buddy is not in a hole &&
  236. * (b) the buddy is free &&
  237. * (c) the buddy is on the buddy system &&
  238. * (d) a page and its buddy have the same order.
  239. * for recording page's order, we use page_private(page) and PG_private.
  240. *
  241. */
  242. static inline int page_is_buddy(struct page *page, int order)
  243. {
  244. #ifdef CONFIG_HOLES_IN_ZONE
  245. if (!pfn_valid(page_to_pfn(page)))
  246. return 0;
  247. #endif
  248. if (PagePrivate(page) &&
  249. (page_order(page) == order) &&
  250. page_count(page) == 0)
  251. return 1;
  252. return 0;
  253. }
  254. /*
  255. * Freeing function for a buddy system allocator.
  256. *
  257. * The concept of a buddy system is to maintain direct-mapped table
  258. * (containing bit values) for memory blocks of various "orders".
  259. * The bottom level table contains the map for the smallest allocatable
  260. * units of memory (here, pages), and each level above it describes
  261. * pairs of units from the levels below, hence, "buddies".
  262. * At a high level, all that happens here is marking the table entry
  263. * at the bottom level available, and propagating the changes upward
  264. * as necessary, plus some accounting needed to play nicely with other
  265. * parts of the VM system.
  266. * At each level, we keep a list of pages, which are heads of continuous
  267. * free pages of length of (1 << order) and marked with PG_Private.Page's
  268. * order is recorded in page_private(page) field.
  269. * So when we are allocating or freeing one, we can derive the state of the
  270. * other. That is, if we allocate a small block, and both were
  271. * free, the remainder of the region must be split into blocks.
  272. * If a block is freed, and its buddy is also free, then this
  273. * triggers coalescing into a block of larger size.
  274. *
  275. * -- wli
  276. */
  277. static inline void __free_one_page(struct page *page,
  278. struct zone *zone, unsigned int order)
  279. {
  280. unsigned long page_idx;
  281. int order_size = 1 << order;
  282. if (unlikely(PageCompound(page)))
  283. destroy_compound_page(page, order);
  284. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  285. BUG_ON(page_idx & (order_size - 1));
  286. BUG_ON(bad_range(zone, page));
  287. zone->free_pages += order_size;
  288. while (order < MAX_ORDER-1) {
  289. unsigned long combined_idx;
  290. struct free_area *area;
  291. struct page *buddy;
  292. buddy = __page_find_buddy(page, page_idx, order);
  293. if (!page_is_buddy(buddy, order))
  294. break; /* Move the buddy up one level. */
  295. list_del(&buddy->lru);
  296. area = zone->free_area + order;
  297. area->nr_free--;
  298. rmv_page_order(buddy);
  299. combined_idx = __find_combined_index(page_idx, order);
  300. page = page + (combined_idx - page_idx);
  301. page_idx = combined_idx;
  302. order++;
  303. }
  304. set_page_order(page, order);
  305. list_add(&page->lru, &zone->free_area[order].free_list);
  306. zone->free_area[order].nr_free++;
  307. }
  308. static inline int free_pages_check(struct page *page)
  309. {
  310. if (unlikely(page_mapcount(page) |
  311. (page->mapping != NULL) |
  312. (page_count(page) != 0) |
  313. (page->flags & (
  314. 1 << PG_lru |
  315. 1 << PG_private |
  316. 1 << PG_locked |
  317. 1 << PG_active |
  318. 1 << PG_reclaim |
  319. 1 << PG_slab |
  320. 1 << PG_swapcache |
  321. 1 << PG_writeback |
  322. 1 << PG_reserved ))))
  323. bad_page(page);
  324. if (PageDirty(page))
  325. __ClearPageDirty(page);
  326. /*
  327. * For now, we report if PG_reserved was found set, but do not
  328. * clear it, and do not free the page. But we shall soon need
  329. * to do more, for when the ZERO_PAGE count wraps negative.
  330. */
  331. return PageReserved(page);
  332. }
  333. /*
  334. * Frees a list of pages.
  335. * Assumes all pages on list are in same zone, and of same order.
  336. * count is the number of pages to free.
  337. *
  338. * If the zone was previously in an "all pages pinned" state then look to
  339. * see if this freeing clears that state.
  340. *
  341. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  342. * pinned" detection logic.
  343. */
  344. static void free_pages_bulk(struct zone *zone, int count,
  345. struct list_head *list, int order)
  346. {
  347. spin_lock(&zone->lock);
  348. zone->all_unreclaimable = 0;
  349. zone->pages_scanned = 0;
  350. while (count--) {
  351. struct page *page;
  352. BUG_ON(list_empty(list));
  353. page = list_entry(list->prev, struct page, lru);
  354. /* have to delete it as __free_one_page list manipulates */
  355. list_del(&page->lru);
  356. __free_one_page(page, zone, order);
  357. }
  358. spin_unlock(&zone->lock);
  359. }
  360. static void free_one_page(struct zone *zone, struct page *page, int order)
  361. {
  362. LIST_HEAD(list);
  363. list_add(&page->lru, &list);
  364. free_pages_bulk(zone, 1, &list, order);
  365. }
  366. static void __free_pages_ok(struct page *page, unsigned int order)
  367. {
  368. unsigned long flags;
  369. int i;
  370. int reserved = 0;
  371. arch_free_page(page, order);
  372. if (!PageHighMem(page))
  373. mutex_debug_check_no_locks_freed(page_address(page),
  374. PAGE_SIZE<<order);
  375. #ifndef CONFIG_MMU
  376. for (i = 1 ; i < (1 << order) ; ++i)
  377. __put_page(page + i);
  378. #endif
  379. for (i = 0 ; i < (1 << order) ; ++i)
  380. reserved += free_pages_check(page + i);
  381. if (reserved)
  382. return;
  383. kernel_map_pages(page, 1 << order, 0);
  384. local_irq_save(flags);
  385. __mod_page_state(pgfree, 1 << order);
  386. free_one_page(page_zone(page), page, order);
  387. local_irq_restore(flags);
  388. }
  389. /*
  390. * permit the bootmem allocator to evade page validation on high-order frees
  391. */
  392. void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
  393. {
  394. if (order == 0) {
  395. __ClearPageReserved(page);
  396. set_page_count(page, 0);
  397. free_hot_cold_page(page, 0);
  398. } else {
  399. LIST_HEAD(list);
  400. int loop;
  401. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  402. struct page *p = &page[loop];
  403. if (loop + 16 < BITS_PER_LONG)
  404. prefetchw(p + 16);
  405. __ClearPageReserved(p);
  406. set_page_count(p, 0);
  407. }
  408. arch_free_page(page, order);
  409. mod_page_state(pgfree, 1 << order);
  410. list_add(&page->lru, &list);
  411. kernel_map_pages(page, 1 << order, 0);
  412. free_pages_bulk(page_zone(page), 1, &list, order);
  413. }
  414. }
  415. /*
  416. * The order of subdivision here is critical for the IO subsystem.
  417. * Please do not alter this order without good reasons and regression
  418. * testing. Specifically, as large blocks of memory are subdivided,
  419. * the order in which smaller blocks are delivered depends on the order
  420. * they're subdivided in this function. This is the primary factor
  421. * influencing the order in which pages are delivered to the IO
  422. * subsystem according to empirical testing, and this is also justified
  423. * by considering the behavior of a buddy system containing a single
  424. * large block of memory acted on by a series of small allocations.
  425. * This behavior is a critical factor in sglist merging's success.
  426. *
  427. * -- wli
  428. */
  429. static inline void expand(struct zone *zone, struct page *page,
  430. int low, int high, struct free_area *area)
  431. {
  432. unsigned long size = 1 << high;
  433. while (high > low) {
  434. area--;
  435. high--;
  436. size >>= 1;
  437. BUG_ON(bad_range(zone, &page[size]));
  438. list_add(&page[size].lru, &area->free_list);
  439. area->nr_free++;
  440. set_page_order(&page[size], high);
  441. }
  442. }
  443. /*
  444. * This page is about to be returned from the page allocator
  445. */
  446. static int prep_new_page(struct page *page, int order)
  447. {
  448. if (unlikely(page_mapcount(page) |
  449. (page->mapping != NULL) |
  450. (page_count(page) != 0) |
  451. (page->flags & (
  452. 1 << PG_lru |
  453. 1 << PG_private |
  454. 1 << PG_locked |
  455. 1 << PG_active |
  456. 1 << PG_dirty |
  457. 1 << PG_reclaim |
  458. 1 << PG_slab |
  459. 1 << PG_swapcache |
  460. 1 << PG_writeback |
  461. 1 << PG_reserved ))))
  462. bad_page(page);
  463. /*
  464. * For now, we report if PG_reserved was found set, but do not
  465. * clear it, and do not allocate the page: as a safety net.
  466. */
  467. if (PageReserved(page))
  468. return 1;
  469. page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
  470. 1 << PG_referenced | 1 << PG_arch_1 |
  471. 1 << PG_checked | 1 << PG_mappedtodisk);
  472. set_page_private(page, 0);
  473. set_page_refs(page, order);
  474. kernel_map_pages(page, 1 << order, 1);
  475. return 0;
  476. }
  477. /*
  478. * Do the hard work of removing an element from the buddy allocator.
  479. * Call me with the zone->lock already held.
  480. */
  481. static struct page *__rmqueue(struct zone *zone, unsigned int order)
  482. {
  483. struct free_area * area;
  484. unsigned int current_order;
  485. struct page *page;
  486. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  487. area = zone->free_area + current_order;
  488. if (list_empty(&area->free_list))
  489. continue;
  490. page = list_entry(area->free_list.next, struct page, lru);
  491. list_del(&page->lru);
  492. rmv_page_order(page);
  493. area->nr_free--;
  494. zone->free_pages -= 1UL << order;
  495. expand(zone, page, order, current_order, area);
  496. return page;
  497. }
  498. return NULL;
  499. }
  500. /*
  501. * Obtain a specified number of elements from the buddy allocator, all under
  502. * a single hold of the lock, for efficiency. Add them to the supplied list.
  503. * Returns the number of new pages which were placed at *list.
  504. */
  505. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  506. unsigned long count, struct list_head *list)
  507. {
  508. int i;
  509. spin_lock(&zone->lock);
  510. for (i = 0; i < count; ++i) {
  511. struct page *page = __rmqueue(zone, order);
  512. if (unlikely(page == NULL))
  513. break;
  514. list_add_tail(&page->lru, list);
  515. }
  516. spin_unlock(&zone->lock);
  517. return i;
  518. }
  519. #ifdef CONFIG_NUMA
  520. /* Called from the slab reaper to drain remote pagesets */
  521. void drain_remote_pages(void)
  522. {
  523. struct zone *zone;
  524. int i;
  525. unsigned long flags;
  526. local_irq_save(flags);
  527. for_each_zone(zone) {
  528. struct per_cpu_pageset *pset;
  529. /* Do not drain local pagesets */
  530. if (zone->zone_pgdat->node_id == numa_node_id())
  531. continue;
  532. pset = zone_pcp(zone, smp_processor_id());
  533. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  534. struct per_cpu_pages *pcp;
  535. pcp = &pset->pcp[i];
  536. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  537. pcp->count = 0;
  538. }
  539. }
  540. local_irq_restore(flags);
  541. }
  542. #endif
  543. #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
  544. static void __drain_pages(unsigned int cpu)
  545. {
  546. unsigned long flags;
  547. struct zone *zone;
  548. int i;
  549. for_each_zone(zone) {
  550. struct per_cpu_pageset *pset;
  551. pset = zone_pcp(zone, cpu);
  552. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  553. struct per_cpu_pages *pcp;
  554. pcp = &pset->pcp[i];
  555. local_irq_save(flags);
  556. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  557. pcp->count = 0;
  558. local_irq_restore(flags);
  559. }
  560. }
  561. }
  562. #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
  563. #ifdef CONFIG_PM
  564. void mark_free_pages(struct zone *zone)
  565. {
  566. unsigned long zone_pfn, flags;
  567. int order;
  568. struct list_head *curr;
  569. if (!zone->spanned_pages)
  570. return;
  571. spin_lock_irqsave(&zone->lock, flags);
  572. for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
  573. ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
  574. for (order = MAX_ORDER - 1; order >= 0; --order)
  575. list_for_each(curr, &zone->free_area[order].free_list) {
  576. unsigned long start_pfn, i;
  577. start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
  578. for (i=0; i < (1<<order); i++)
  579. SetPageNosaveFree(pfn_to_page(start_pfn+i));
  580. }
  581. spin_unlock_irqrestore(&zone->lock, flags);
  582. }
  583. /*
  584. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  585. */
  586. void drain_local_pages(void)
  587. {
  588. unsigned long flags;
  589. local_irq_save(flags);
  590. __drain_pages(smp_processor_id());
  591. local_irq_restore(flags);
  592. }
  593. #endif /* CONFIG_PM */
  594. static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
  595. {
  596. #ifdef CONFIG_NUMA
  597. pg_data_t *pg = z->zone_pgdat;
  598. pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
  599. struct per_cpu_pageset *p;
  600. p = zone_pcp(z, cpu);
  601. if (pg == orig) {
  602. p->numa_hit++;
  603. } else {
  604. p->numa_miss++;
  605. zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
  606. }
  607. if (pg == NODE_DATA(numa_node_id()))
  608. p->local_node++;
  609. else
  610. p->other_node++;
  611. #endif
  612. }
  613. /*
  614. * Free a 0-order page
  615. */
  616. static void fastcall free_hot_cold_page(struct page *page, int cold)
  617. {
  618. struct zone *zone = page_zone(page);
  619. struct per_cpu_pages *pcp;
  620. unsigned long flags;
  621. arch_free_page(page, 0);
  622. if (PageAnon(page))
  623. page->mapping = NULL;
  624. if (free_pages_check(page))
  625. return;
  626. kernel_map_pages(page, 1, 0);
  627. pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
  628. local_irq_save(flags);
  629. __inc_page_state(pgfree);
  630. list_add(&page->lru, &pcp->list);
  631. pcp->count++;
  632. if (pcp->count >= pcp->high) {
  633. free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
  634. pcp->count -= pcp->batch;
  635. }
  636. local_irq_restore(flags);
  637. put_cpu();
  638. }
  639. void fastcall free_hot_page(struct page *page)
  640. {
  641. free_hot_cold_page(page, 0);
  642. }
  643. void fastcall free_cold_page(struct page *page)
  644. {
  645. free_hot_cold_page(page, 1);
  646. }
  647. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  648. {
  649. int i;
  650. BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
  651. for(i = 0; i < (1 << order); i++)
  652. clear_highpage(page + i);
  653. }
  654. /*
  655. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  656. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  657. * or two.
  658. */
  659. static struct page *buffered_rmqueue(struct zonelist *zonelist,
  660. struct zone *zone, int order, gfp_t gfp_flags)
  661. {
  662. unsigned long flags;
  663. struct page *page;
  664. int cold = !!(gfp_flags & __GFP_COLD);
  665. int cpu;
  666. again:
  667. cpu = get_cpu();
  668. if (likely(order == 0)) {
  669. struct per_cpu_pages *pcp;
  670. pcp = &zone_pcp(zone, cpu)->pcp[cold];
  671. local_irq_save(flags);
  672. if (!pcp->count) {
  673. pcp->count += rmqueue_bulk(zone, 0,
  674. pcp->batch, &pcp->list);
  675. if (unlikely(!pcp->count))
  676. goto failed;
  677. }
  678. page = list_entry(pcp->list.next, struct page, lru);
  679. list_del(&page->lru);
  680. pcp->count--;
  681. } else {
  682. spin_lock_irqsave(&zone->lock, flags);
  683. page = __rmqueue(zone, order);
  684. spin_unlock(&zone->lock);
  685. if (!page)
  686. goto failed;
  687. }
  688. __mod_page_state_zone(zone, pgalloc, 1 << order);
  689. zone_statistics(zonelist, zone, cpu);
  690. local_irq_restore(flags);
  691. put_cpu();
  692. BUG_ON(bad_range(zone, page));
  693. if (prep_new_page(page, order))
  694. goto again;
  695. if (gfp_flags & __GFP_ZERO)
  696. prep_zero_page(page, order, gfp_flags);
  697. if (order && (gfp_flags & __GFP_COMP))
  698. prep_compound_page(page, order);
  699. return page;
  700. failed:
  701. local_irq_restore(flags);
  702. put_cpu();
  703. return NULL;
  704. }
  705. #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
  706. #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
  707. #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
  708. #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
  709. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  710. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  711. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  712. /*
  713. * Return 1 if free pages are above 'mark'. This takes into account the order
  714. * of the allocation.
  715. */
  716. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  717. int classzone_idx, int alloc_flags)
  718. {
  719. /* free_pages my go negative - that's OK */
  720. long min = mark, free_pages = z->free_pages - (1 << order) + 1;
  721. int o;
  722. if (alloc_flags & ALLOC_HIGH)
  723. min -= min / 2;
  724. if (alloc_flags & ALLOC_HARDER)
  725. min -= min / 4;
  726. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  727. return 0;
  728. for (o = 0; o < order; o++) {
  729. /* At the next order, this order's pages become unavailable */
  730. free_pages -= z->free_area[o].nr_free << o;
  731. /* Require fewer higher order pages to be free */
  732. min >>= 1;
  733. if (free_pages <= min)
  734. return 0;
  735. }
  736. return 1;
  737. }
  738. /*
  739. * get_page_from_freeliest goes through the zonelist trying to allocate
  740. * a page.
  741. */
  742. static struct page *
  743. get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
  744. struct zonelist *zonelist, int alloc_flags)
  745. {
  746. struct zone **z = zonelist->zones;
  747. struct page *page = NULL;
  748. int classzone_idx = zone_idx(*z);
  749. /*
  750. * Go through the zonelist once, looking for a zone with enough free.
  751. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  752. */
  753. do {
  754. if ((alloc_flags & ALLOC_CPUSET) &&
  755. !cpuset_zone_allowed(*z, gfp_mask))
  756. continue;
  757. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  758. unsigned long mark;
  759. if (alloc_flags & ALLOC_WMARK_MIN)
  760. mark = (*z)->pages_min;
  761. else if (alloc_flags & ALLOC_WMARK_LOW)
  762. mark = (*z)->pages_low;
  763. else
  764. mark = (*z)->pages_high;
  765. if (!zone_watermark_ok(*z, order, mark,
  766. classzone_idx, alloc_flags))
  767. if (!zone_reclaim_mode ||
  768. !zone_reclaim(*z, gfp_mask, order))
  769. continue;
  770. }
  771. page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
  772. if (page) {
  773. break;
  774. }
  775. } while (*(++z) != NULL);
  776. return page;
  777. }
  778. /*
  779. * This is the 'heart' of the zoned buddy allocator.
  780. */
  781. struct page * fastcall
  782. __alloc_pages(gfp_t gfp_mask, unsigned int order,
  783. struct zonelist *zonelist)
  784. {
  785. const gfp_t wait = gfp_mask & __GFP_WAIT;
  786. struct zone **z;
  787. struct page *page;
  788. struct reclaim_state reclaim_state;
  789. struct task_struct *p = current;
  790. int do_retry;
  791. int alloc_flags;
  792. int did_some_progress;
  793. might_sleep_if(wait);
  794. restart:
  795. z = zonelist->zones; /* the list of zones suitable for gfp_mask */
  796. if (unlikely(*z == NULL)) {
  797. /* Should this ever happen?? */
  798. return NULL;
  799. }
  800. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  801. zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
  802. if (page)
  803. goto got_pg;
  804. do {
  805. wakeup_kswapd(*z, order);
  806. } while (*(++z));
  807. /*
  808. * OK, we're below the kswapd watermark and have kicked background
  809. * reclaim. Now things get more complex, so set up alloc_flags according
  810. * to how we want to proceed.
  811. *
  812. * The caller may dip into page reserves a bit more if the caller
  813. * cannot run direct reclaim, or if the caller has realtime scheduling
  814. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  815. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  816. */
  817. alloc_flags = ALLOC_WMARK_MIN;
  818. if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
  819. alloc_flags |= ALLOC_HARDER;
  820. if (gfp_mask & __GFP_HIGH)
  821. alloc_flags |= ALLOC_HIGH;
  822. alloc_flags |= ALLOC_CPUSET;
  823. /*
  824. * Go through the zonelist again. Let __GFP_HIGH and allocations
  825. * coming from realtime tasks go deeper into reserves.
  826. *
  827. * This is the last chance, in general, before the goto nopage.
  828. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  829. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  830. */
  831. page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
  832. if (page)
  833. goto got_pg;
  834. /* This allocation should allow future memory freeing. */
  835. if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
  836. && !in_interrupt()) {
  837. if (!(gfp_mask & __GFP_NOMEMALLOC)) {
  838. nofail_alloc:
  839. /* go through the zonelist yet again, ignoring mins */
  840. page = get_page_from_freelist(gfp_mask, order,
  841. zonelist, ALLOC_NO_WATERMARKS);
  842. if (page)
  843. goto got_pg;
  844. if (gfp_mask & __GFP_NOFAIL) {
  845. blk_congestion_wait(WRITE, HZ/50);
  846. goto nofail_alloc;
  847. }
  848. }
  849. goto nopage;
  850. }
  851. /* Atomic allocations - we can't balance anything */
  852. if (!wait)
  853. goto nopage;
  854. rebalance:
  855. cond_resched();
  856. /* We now go into synchronous reclaim */
  857. cpuset_memory_pressure_bump();
  858. p->flags |= PF_MEMALLOC;
  859. reclaim_state.reclaimed_slab = 0;
  860. p->reclaim_state = &reclaim_state;
  861. did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
  862. p->reclaim_state = NULL;
  863. p->flags &= ~PF_MEMALLOC;
  864. cond_resched();
  865. if (likely(did_some_progress)) {
  866. page = get_page_from_freelist(gfp_mask, order,
  867. zonelist, alloc_flags);
  868. if (page)
  869. goto got_pg;
  870. } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  871. /*
  872. * Go through the zonelist yet one more time, keep
  873. * very high watermark here, this is only to catch
  874. * a parallel oom killing, we must fail if we're still
  875. * under heavy pressure.
  876. */
  877. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  878. zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
  879. if (page)
  880. goto got_pg;
  881. out_of_memory(gfp_mask, order);
  882. goto restart;
  883. }
  884. /*
  885. * Don't let big-order allocations loop unless the caller explicitly
  886. * requests that. Wait for some write requests to complete then retry.
  887. *
  888. * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
  889. * <= 3, but that may not be true in other implementations.
  890. */
  891. do_retry = 0;
  892. if (!(gfp_mask & __GFP_NORETRY)) {
  893. if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
  894. do_retry = 1;
  895. if (gfp_mask & __GFP_NOFAIL)
  896. do_retry = 1;
  897. }
  898. if (do_retry) {
  899. blk_congestion_wait(WRITE, HZ/50);
  900. goto rebalance;
  901. }
  902. nopage:
  903. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  904. printk(KERN_WARNING "%s: page allocation failure."
  905. " order:%d, mode:0x%x\n",
  906. p->comm, order, gfp_mask);
  907. dump_stack();
  908. show_mem();
  909. }
  910. got_pg:
  911. return page;
  912. }
  913. EXPORT_SYMBOL(__alloc_pages);
  914. /*
  915. * Common helper functions.
  916. */
  917. fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  918. {
  919. struct page * page;
  920. page = alloc_pages(gfp_mask, order);
  921. if (!page)
  922. return 0;
  923. return (unsigned long) page_address(page);
  924. }
  925. EXPORT_SYMBOL(__get_free_pages);
  926. fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
  927. {
  928. struct page * page;
  929. /*
  930. * get_zeroed_page() returns a 32-bit address, which cannot represent
  931. * a highmem page
  932. */
  933. BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  934. page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
  935. if (page)
  936. return (unsigned long) page_address(page);
  937. return 0;
  938. }
  939. EXPORT_SYMBOL(get_zeroed_page);
  940. void __pagevec_free(struct pagevec *pvec)
  941. {
  942. int i = pagevec_count(pvec);
  943. while (--i >= 0)
  944. free_hot_cold_page(pvec->pages[i], pvec->cold);
  945. }
  946. fastcall void __free_pages(struct page *page, unsigned int order)
  947. {
  948. if (put_page_testzero(page)) {
  949. if (order == 0)
  950. free_hot_page(page);
  951. else
  952. __free_pages_ok(page, order);
  953. }
  954. }
  955. EXPORT_SYMBOL(__free_pages);
  956. fastcall void free_pages(unsigned long addr, unsigned int order)
  957. {
  958. if (addr != 0) {
  959. BUG_ON(!virt_addr_valid((void *)addr));
  960. __free_pages(virt_to_page((void *)addr), order);
  961. }
  962. }
  963. EXPORT_SYMBOL(free_pages);
  964. /*
  965. * Total amount of free (allocatable) RAM:
  966. */
  967. unsigned int nr_free_pages(void)
  968. {
  969. unsigned int sum = 0;
  970. struct zone *zone;
  971. for_each_zone(zone)
  972. sum += zone->free_pages;
  973. return sum;
  974. }
  975. EXPORT_SYMBOL(nr_free_pages);
  976. #ifdef CONFIG_NUMA
  977. unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
  978. {
  979. unsigned int i, sum = 0;
  980. for (i = 0; i < MAX_NR_ZONES; i++)
  981. sum += pgdat->node_zones[i].free_pages;
  982. return sum;
  983. }
  984. #endif
  985. static unsigned int nr_free_zone_pages(int offset)
  986. {
  987. /* Just pick one node, since fallback list is circular */
  988. pg_data_t *pgdat = NODE_DATA(numa_node_id());
  989. unsigned int sum = 0;
  990. struct zonelist *zonelist = pgdat->node_zonelists + offset;
  991. struct zone **zonep = zonelist->zones;
  992. struct zone *zone;
  993. for (zone = *zonep++; zone; zone = *zonep++) {
  994. unsigned long size = zone->present_pages;
  995. unsigned long high = zone->pages_high;
  996. if (size > high)
  997. sum += size - high;
  998. }
  999. return sum;
  1000. }
  1001. /*
  1002. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1003. */
  1004. unsigned int nr_free_buffer_pages(void)
  1005. {
  1006. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1007. }
  1008. /*
  1009. * Amount of free RAM allocatable within all zones
  1010. */
  1011. unsigned int nr_free_pagecache_pages(void)
  1012. {
  1013. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
  1014. }
  1015. #ifdef CONFIG_HIGHMEM
  1016. unsigned int nr_free_highpages (void)
  1017. {
  1018. pg_data_t *pgdat;
  1019. unsigned int pages = 0;
  1020. for_each_pgdat(pgdat)
  1021. pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
  1022. return pages;
  1023. }
  1024. #endif
  1025. #ifdef CONFIG_NUMA
  1026. static void show_node(struct zone *zone)
  1027. {
  1028. printk("Node %d ", zone->zone_pgdat->node_id);
  1029. }
  1030. #else
  1031. #define show_node(zone) do { } while (0)
  1032. #endif
  1033. /*
  1034. * Accumulate the page_state information across all CPUs.
  1035. * The result is unavoidably approximate - it can change
  1036. * during and after execution of this function.
  1037. */
  1038. static DEFINE_PER_CPU(struct page_state, page_states) = {0};
  1039. atomic_t nr_pagecache = ATOMIC_INIT(0);
  1040. EXPORT_SYMBOL(nr_pagecache);
  1041. #ifdef CONFIG_SMP
  1042. DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
  1043. #endif
  1044. static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
  1045. {
  1046. int cpu = 0;
  1047. memset(ret, 0, sizeof(*ret));
  1048. cpus_and(*cpumask, *cpumask, cpu_online_map);
  1049. cpu = first_cpu(*cpumask);
  1050. while (cpu < NR_CPUS) {
  1051. unsigned long *in, *out, off;
  1052. in = (unsigned long *)&per_cpu(page_states, cpu);
  1053. cpu = next_cpu(cpu, *cpumask);
  1054. if (cpu < NR_CPUS)
  1055. prefetch(&per_cpu(page_states, cpu));
  1056. out = (unsigned long *)ret;
  1057. for (off = 0; off < nr; off++)
  1058. *out++ += *in++;
  1059. }
  1060. }
  1061. void get_page_state_node(struct page_state *ret, int node)
  1062. {
  1063. int nr;
  1064. cpumask_t mask = node_to_cpumask(node);
  1065. nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
  1066. nr /= sizeof(unsigned long);
  1067. __get_page_state(ret, nr+1, &mask);
  1068. }
  1069. void get_page_state(struct page_state *ret)
  1070. {
  1071. int nr;
  1072. cpumask_t mask = CPU_MASK_ALL;
  1073. nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
  1074. nr /= sizeof(unsigned long);
  1075. __get_page_state(ret, nr + 1, &mask);
  1076. }
  1077. void get_full_page_state(struct page_state *ret)
  1078. {
  1079. cpumask_t mask = CPU_MASK_ALL;
  1080. __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
  1081. }
  1082. unsigned long read_page_state_offset(unsigned long offset)
  1083. {
  1084. unsigned long ret = 0;
  1085. int cpu;
  1086. for_each_online_cpu(cpu) {
  1087. unsigned long in;
  1088. in = (unsigned long)&per_cpu(page_states, cpu) + offset;
  1089. ret += *((unsigned long *)in);
  1090. }
  1091. return ret;
  1092. }
  1093. void __mod_page_state_offset(unsigned long offset, unsigned long delta)
  1094. {
  1095. void *ptr;
  1096. ptr = &__get_cpu_var(page_states);
  1097. *(unsigned long *)(ptr + offset) += delta;
  1098. }
  1099. EXPORT_SYMBOL(__mod_page_state_offset);
  1100. void mod_page_state_offset(unsigned long offset, unsigned long delta)
  1101. {
  1102. unsigned long flags;
  1103. void *ptr;
  1104. local_irq_save(flags);
  1105. ptr = &__get_cpu_var(page_states);
  1106. *(unsigned long *)(ptr + offset) += delta;
  1107. local_irq_restore(flags);
  1108. }
  1109. EXPORT_SYMBOL(mod_page_state_offset);
  1110. void __get_zone_counts(unsigned long *active, unsigned long *inactive,
  1111. unsigned long *free, struct pglist_data *pgdat)
  1112. {
  1113. struct zone *zones = pgdat->node_zones;
  1114. int i;
  1115. *active = 0;
  1116. *inactive = 0;
  1117. *free = 0;
  1118. for (i = 0; i < MAX_NR_ZONES; i++) {
  1119. *active += zones[i].nr_active;
  1120. *inactive += zones[i].nr_inactive;
  1121. *free += zones[i].free_pages;
  1122. }
  1123. }
  1124. void get_zone_counts(unsigned long *active,
  1125. unsigned long *inactive, unsigned long *free)
  1126. {
  1127. struct pglist_data *pgdat;
  1128. *active = 0;
  1129. *inactive = 0;
  1130. *free = 0;
  1131. for_each_pgdat(pgdat) {
  1132. unsigned long l, m, n;
  1133. __get_zone_counts(&l, &m, &n, pgdat);
  1134. *active += l;
  1135. *inactive += m;
  1136. *free += n;
  1137. }
  1138. }
  1139. void si_meminfo(struct sysinfo *val)
  1140. {
  1141. val->totalram = totalram_pages;
  1142. val->sharedram = 0;
  1143. val->freeram = nr_free_pages();
  1144. val->bufferram = nr_blockdev_pages();
  1145. #ifdef CONFIG_HIGHMEM
  1146. val->totalhigh = totalhigh_pages;
  1147. val->freehigh = nr_free_highpages();
  1148. #else
  1149. val->totalhigh = 0;
  1150. val->freehigh = 0;
  1151. #endif
  1152. val->mem_unit = PAGE_SIZE;
  1153. }
  1154. EXPORT_SYMBOL(si_meminfo);
  1155. #ifdef CONFIG_NUMA
  1156. void si_meminfo_node(struct sysinfo *val, int nid)
  1157. {
  1158. pg_data_t *pgdat = NODE_DATA(nid);
  1159. val->totalram = pgdat->node_present_pages;
  1160. val->freeram = nr_free_pages_pgdat(pgdat);
  1161. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1162. val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
  1163. val->mem_unit = PAGE_SIZE;
  1164. }
  1165. #endif
  1166. #define K(x) ((x) << (PAGE_SHIFT-10))
  1167. /*
  1168. * Show free area list (used inside shift_scroll-lock stuff)
  1169. * We also calculate the percentage fragmentation. We do this by counting the
  1170. * memory on each free list with the exception of the first item on the list.
  1171. */
  1172. void show_free_areas(void)
  1173. {
  1174. struct page_state ps;
  1175. int cpu, temperature;
  1176. unsigned long active;
  1177. unsigned long inactive;
  1178. unsigned long free;
  1179. struct zone *zone;
  1180. for_each_zone(zone) {
  1181. show_node(zone);
  1182. printk("%s per-cpu:", zone->name);
  1183. if (!populated_zone(zone)) {
  1184. printk(" empty\n");
  1185. continue;
  1186. } else
  1187. printk("\n");
  1188. for_each_online_cpu(cpu) {
  1189. struct per_cpu_pageset *pageset;
  1190. pageset = zone_pcp(zone, cpu);
  1191. for (temperature = 0; temperature < 2; temperature++)
  1192. printk("cpu %d %s: high %d, batch %d used:%d\n",
  1193. cpu,
  1194. temperature ? "cold" : "hot",
  1195. pageset->pcp[temperature].high,
  1196. pageset->pcp[temperature].batch,
  1197. pageset->pcp[temperature].count);
  1198. }
  1199. }
  1200. get_page_state(&ps);
  1201. get_zone_counts(&active, &inactive, &free);
  1202. printk("Free pages: %11ukB (%ukB HighMem)\n",
  1203. K(nr_free_pages()),
  1204. K(nr_free_highpages()));
  1205. printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
  1206. "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
  1207. active,
  1208. inactive,
  1209. ps.nr_dirty,
  1210. ps.nr_writeback,
  1211. ps.nr_unstable,
  1212. nr_free_pages(),
  1213. ps.nr_slab,
  1214. ps.nr_mapped,
  1215. ps.nr_page_table_pages);
  1216. for_each_zone(zone) {
  1217. int i;
  1218. show_node(zone);
  1219. printk("%s"
  1220. " free:%lukB"
  1221. " min:%lukB"
  1222. " low:%lukB"
  1223. " high:%lukB"
  1224. " active:%lukB"
  1225. " inactive:%lukB"
  1226. " present:%lukB"
  1227. " pages_scanned:%lu"
  1228. " all_unreclaimable? %s"
  1229. "\n",
  1230. zone->name,
  1231. K(zone->free_pages),
  1232. K(zone->pages_min),
  1233. K(zone->pages_low),
  1234. K(zone->pages_high),
  1235. K(zone->nr_active),
  1236. K(zone->nr_inactive),
  1237. K(zone->present_pages),
  1238. zone->pages_scanned,
  1239. (zone->all_unreclaimable ? "yes" : "no")
  1240. );
  1241. printk("lowmem_reserve[]:");
  1242. for (i = 0; i < MAX_NR_ZONES; i++)
  1243. printk(" %lu", zone->lowmem_reserve[i]);
  1244. printk("\n");
  1245. }
  1246. for_each_zone(zone) {
  1247. unsigned long nr, flags, order, total = 0;
  1248. show_node(zone);
  1249. printk("%s: ", zone->name);
  1250. if (!populated_zone(zone)) {
  1251. printk("empty\n");
  1252. continue;
  1253. }
  1254. spin_lock_irqsave(&zone->lock, flags);
  1255. for (order = 0; order < MAX_ORDER; order++) {
  1256. nr = zone->free_area[order].nr_free;
  1257. total += nr << order;
  1258. printk("%lu*%lukB ", nr, K(1UL) << order);
  1259. }
  1260. spin_unlock_irqrestore(&zone->lock, flags);
  1261. printk("= %lukB\n", K(total));
  1262. }
  1263. show_swap_cache_info();
  1264. }
  1265. /*
  1266. * Builds allocation fallback zone lists.
  1267. *
  1268. * Add all populated zones of a node to the zonelist.
  1269. */
  1270. static int __init build_zonelists_node(pg_data_t *pgdat,
  1271. struct zonelist *zonelist, int nr_zones, int zone_type)
  1272. {
  1273. struct zone *zone;
  1274. BUG_ON(zone_type > ZONE_HIGHMEM);
  1275. do {
  1276. zone = pgdat->node_zones + zone_type;
  1277. if (populated_zone(zone)) {
  1278. #ifndef CONFIG_HIGHMEM
  1279. BUG_ON(zone_type > ZONE_NORMAL);
  1280. #endif
  1281. zonelist->zones[nr_zones++] = zone;
  1282. check_highest_zone(zone_type);
  1283. }
  1284. zone_type--;
  1285. } while (zone_type >= 0);
  1286. return nr_zones;
  1287. }
  1288. static inline int highest_zone(int zone_bits)
  1289. {
  1290. int res = ZONE_NORMAL;
  1291. if (zone_bits & (__force int)__GFP_HIGHMEM)
  1292. res = ZONE_HIGHMEM;
  1293. if (zone_bits & (__force int)__GFP_DMA32)
  1294. res = ZONE_DMA32;
  1295. if (zone_bits & (__force int)__GFP_DMA)
  1296. res = ZONE_DMA;
  1297. return res;
  1298. }
  1299. #ifdef CONFIG_NUMA
  1300. #define MAX_NODE_LOAD (num_online_nodes())
  1301. static int __initdata node_load[MAX_NUMNODES];
  1302. /**
  1303. * find_next_best_node - find the next node that should appear in a given node's fallback list
  1304. * @node: node whose fallback list we're appending
  1305. * @used_node_mask: nodemask_t of already used nodes
  1306. *
  1307. * We use a number of factors to determine which is the next node that should
  1308. * appear on a given node's fallback list. The node should not have appeared
  1309. * already in @node's fallback list, and it should be the next closest node
  1310. * according to the distance array (which contains arbitrary distance values
  1311. * from each node to each node in the system), and should also prefer nodes
  1312. * with no CPUs, since presumably they'll have very little allocation pressure
  1313. * on them otherwise.
  1314. * It returns -1 if no node is found.
  1315. */
  1316. static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
  1317. {
  1318. int i, n, val;
  1319. int min_val = INT_MAX;
  1320. int best_node = -1;
  1321. for_each_online_node(i) {
  1322. cpumask_t tmp;
  1323. /* Start from local node */
  1324. n = (node+i) % num_online_nodes();
  1325. /* Don't want a node to appear more than once */
  1326. if (node_isset(n, *used_node_mask))
  1327. continue;
  1328. /* Use the local node if we haven't already */
  1329. if (!node_isset(node, *used_node_mask)) {
  1330. best_node = node;
  1331. break;
  1332. }
  1333. /* Use the distance array to find the distance */
  1334. val = node_distance(node, n);
  1335. /* Give preference to headless and unused nodes */
  1336. tmp = node_to_cpumask(n);
  1337. if (!cpus_empty(tmp))
  1338. val += PENALTY_FOR_NODE_WITH_CPUS;
  1339. /* Slight preference for less loaded node */
  1340. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  1341. val += node_load[n];
  1342. if (val < min_val) {
  1343. min_val = val;
  1344. best_node = n;
  1345. }
  1346. }
  1347. if (best_node >= 0)
  1348. node_set(best_node, *used_node_mask);
  1349. return best_node;
  1350. }
  1351. static void __init build_zonelists(pg_data_t *pgdat)
  1352. {
  1353. int i, j, k, node, local_node;
  1354. int prev_node, load;
  1355. struct zonelist *zonelist;
  1356. nodemask_t used_mask;
  1357. /* initialize zonelists */
  1358. for (i = 0; i < GFP_ZONETYPES; i++) {
  1359. zonelist = pgdat->node_zonelists + i;
  1360. zonelist->zones[0] = NULL;
  1361. }
  1362. /* NUMA-aware ordering of nodes */
  1363. local_node = pgdat->node_id;
  1364. load = num_online_nodes();
  1365. prev_node = local_node;
  1366. nodes_clear(used_mask);
  1367. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  1368. int distance = node_distance(local_node, node);
  1369. /*
  1370. * If another node is sufficiently far away then it is better
  1371. * to reclaim pages in a zone before going off node.
  1372. */
  1373. if (distance > RECLAIM_DISTANCE)
  1374. zone_reclaim_mode = 1;
  1375. /*
  1376. * We don't want to pressure a particular node.
  1377. * So adding penalty to the first node in same
  1378. * distance group to make it round-robin.
  1379. */
  1380. if (distance != node_distance(local_node, prev_node))
  1381. node_load[node] += load;
  1382. prev_node = node;
  1383. load--;
  1384. for (i = 0; i < GFP_ZONETYPES; i++) {
  1385. zonelist = pgdat->node_zonelists + i;
  1386. for (j = 0; zonelist->zones[j] != NULL; j++);
  1387. k = highest_zone(i);
  1388. j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
  1389. zonelist->zones[j] = NULL;
  1390. }
  1391. }
  1392. }
  1393. #else /* CONFIG_NUMA */
  1394. static void __init build_zonelists(pg_data_t *pgdat)
  1395. {
  1396. int i, j, k, node, local_node;
  1397. local_node = pgdat->node_id;
  1398. for (i = 0; i < GFP_ZONETYPES; i++) {
  1399. struct zonelist *zonelist;
  1400. zonelist = pgdat->node_zonelists + i;
  1401. j = 0;
  1402. k = highest_zone(i);
  1403. j = build_zonelists_node(pgdat, zonelist, j, k);
  1404. /*
  1405. * Now we build the zonelist so that it contains the zones
  1406. * of all the other nodes.
  1407. * We don't want to pressure a particular node, so when
  1408. * building the zones for node N, we make sure that the
  1409. * zones coming right after the local ones are those from
  1410. * node N+1 (modulo N)
  1411. */
  1412. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  1413. if (!node_online(node))
  1414. continue;
  1415. j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
  1416. }
  1417. for (node = 0; node < local_node; node++) {
  1418. if (!node_online(node))
  1419. continue;
  1420. j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
  1421. }
  1422. zonelist->zones[j] = NULL;
  1423. }
  1424. }
  1425. #endif /* CONFIG_NUMA */
  1426. void __init build_all_zonelists(void)
  1427. {
  1428. int i;
  1429. for_each_online_node(i)
  1430. build_zonelists(NODE_DATA(i));
  1431. printk("Built %i zonelists\n", num_online_nodes());
  1432. cpuset_init_current_mems_allowed();
  1433. }
  1434. /*
  1435. * Helper functions to size the waitqueue hash table.
  1436. * Essentially these want to choose hash table sizes sufficiently
  1437. * large so that collisions trying to wait on pages are rare.
  1438. * But in fact, the number of active page waitqueues on typical
  1439. * systems is ridiculously low, less than 200. So this is even
  1440. * conservative, even though it seems large.
  1441. *
  1442. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  1443. * waitqueues, i.e. the size of the waitq table given the number of pages.
  1444. */
  1445. #define PAGES_PER_WAITQUEUE 256
  1446. static inline unsigned long wait_table_size(unsigned long pages)
  1447. {
  1448. unsigned long size = 1;
  1449. pages /= PAGES_PER_WAITQUEUE;
  1450. while (size < pages)
  1451. size <<= 1;
  1452. /*
  1453. * Once we have dozens or even hundreds of threads sleeping
  1454. * on IO we've got bigger problems than wait queue collision.
  1455. * Limit the size of the wait table to a reasonable size.
  1456. */
  1457. size = min(size, 4096UL);
  1458. return max(size, 4UL);
  1459. }
  1460. /*
  1461. * This is an integer logarithm so that shifts can be used later
  1462. * to extract the more random high bits from the multiplicative
  1463. * hash function before the remainder is taken.
  1464. */
  1465. static inline unsigned long wait_table_bits(unsigned long size)
  1466. {
  1467. return ffz(~size);
  1468. }
  1469. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  1470. static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
  1471. unsigned long *zones_size, unsigned long *zholes_size)
  1472. {
  1473. unsigned long realtotalpages, totalpages = 0;
  1474. int i;
  1475. for (i = 0; i < MAX_NR_ZONES; i++)
  1476. totalpages += zones_size[i];
  1477. pgdat->node_spanned_pages = totalpages;
  1478. realtotalpages = totalpages;
  1479. if (zholes_size)
  1480. for (i = 0; i < MAX_NR_ZONES; i++)
  1481. realtotalpages -= zholes_size[i];
  1482. pgdat->node_present_pages = realtotalpages;
  1483. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
  1484. }
  1485. /*
  1486. * Initially all pages are reserved - free ones are freed
  1487. * up by free_all_bootmem() once the early boot process is
  1488. * done. Non-atomic initialization, single-pass.
  1489. */
  1490. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  1491. unsigned long start_pfn)
  1492. {
  1493. struct page *page;
  1494. unsigned long end_pfn = start_pfn + size;
  1495. unsigned long pfn;
  1496. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1497. if (!early_pfn_valid(pfn))
  1498. continue;
  1499. page = pfn_to_page(pfn);
  1500. set_page_links(page, zone, nid, pfn);
  1501. set_page_count(page, 1);
  1502. reset_page_mapcount(page);
  1503. SetPageReserved(page);
  1504. INIT_LIST_HEAD(&page->lru);
  1505. #ifdef WANT_PAGE_VIRTUAL
  1506. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  1507. if (!is_highmem_idx(zone))
  1508. set_page_address(page, __va(pfn << PAGE_SHIFT));
  1509. #endif
  1510. }
  1511. }
  1512. void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
  1513. unsigned long size)
  1514. {
  1515. int order;
  1516. for (order = 0; order < MAX_ORDER ; order++) {
  1517. INIT_LIST_HEAD(&zone->free_area[order].free_list);
  1518. zone->free_area[order].nr_free = 0;
  1519. }
  1520. }
  1521. #define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
  1522. void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
  1523. unsigned long size)
  1524. {
  1525. unsigned long snum = pfn_to_section_nr(pfn);
  1526. unsigned long end = pfn_to_section_nr(pfn + size);
  1527. if (FLAGS_HAS_NODE)
  1528. zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
  1529. else
  1530. for (; snum <= end; snum++)
  1531. zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
  1532. }
  1533. #ifndef __HAVE_ARCH_MEMMAP_INIT
  1534. #define memmap_init(size, nid, zone, start_pfn) \
  1535. memmap_init_zone((size), (nid), (zone), (start_pfn))
  1536. #endif
  1537. static int __meminit zone_batchsize(struct zone *zone)
  1538. {
  1539. int batch;
  1540. /*
  1541. * The per-cpu-pages pools are set to around 1000th of the
  1542. * size of the zone. But no more than 1/2 of a meg.
  1543. *
  1544. * OK, so we don't know how big the cache is. So guess.
  1545. */
  1546. batch = zone->present_pages / 1024;
  1547. if (batch * PAGE_SIZE > 512 * 1024)
  1548. batch = (512 * 1024) / PAGE_SIZE;
  1549. batch /= 4; /* We effectively *= 4 below */
  1550. if (batch < 1)
  1551. batch = 1;
  1552. /*
  1553. * Clamp the batch to a 2^n - 1 value. Having a power
  1554. * of 2 value was found to be more likely to have
  1555. * suboptimal cache aliasing properties in some cases.
  1556. *
  1557. * For example if 2 tasks are alternately allocating
  1558. * batches of pages, one task can end up with a lot
  1559. * of pages of one half of the possible page colors
  1560. * and the other with pages of the other colors.
  1561. */
  1562. batch = (1 << (fls(batch + batch/2)-1)) - 1;
  1563. return batch;
  1564. }
  1565. inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  1566. {
  1567. struct per_cpu_pages *pcp;
  1568. memset(p, 0, sizeof(*p));
  1569. pcp = &p->pcp[0]; /* hot */
  1570. pcp->count = 0;
  1571. pcp->high = 6 * batch;
  1572. pcp->batch = max(1UL, 1 * batch);
  1573. INIT_LIST_HEAD(&pcp->list);
  1574. pcp = &p->pcp[1]; /* cold*/
  1575. pcp->count = 0;
  1576. pcp->high = 2 * batch;
  1577. pcp->batch = max(1UL, batch/2);
  1578. INIT_LIST_HEAD(&pcp->list);
  1579. }
  1580. /*
  1581. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  1582. * to the value high for the pageset p.
  1583. */
  1584. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  1585. unsigned long high)
  1586. {
  1587. struct per_cpu_pages *pcp;
  1588. pcp = &p->pcp[0]; /* hot list */
  1589. pcp->high = high;
  1590. pcp->batch = max(1UL, high/4);
  1591. if ((high/4) > (PAGE_SHIFT * 8))
  1592. pcp->batch = PAGE_SHIFT * 8;
  1593. }
  1594. #ifdef CONFIG_NUMA
  1595. /*
  1596. * Boot pageset table. One per cpu which is going to be used for all
  1597. * zones and all nodes. The parameters will be set in such a way
  1598. * that an item put on a list will immediately be handed over to
  1599. * the buddy list. This is safe since pageset manipulation is done
  1600. * with interrupts disabled.
  1601. *
  1602. * Some NUMA counter updates may also be caught by the boot pagesets.
  1603. *
  1604. * The boot_pagesets must be kept even after bootup is complete for
  1605. * unused processors and/or zones. They do play a role for bootstrapping
  1606. * hotplugged processors.
  1607. *
  1608. * zoneinfo_show() and maybe other functions do
  1609. * not check if the processor is online before following the pageset pointer.
  1610. * Other parts of the kernel may not check if the zone is available.
  1611. */
  1612. static struct per_cpu_pageset
  1613. boot_pageset[NR_CPUS];
  1614. /*
  1615. * Dynamically allocate memory for the
  1616. * per cpu pageset array in struct zone.
  1617. */
  1618. static int __meminit process_zones(int cpu)
  1619. {
  1620. struct zone *zone, *dzone;
  1621. for_each_zone(zone) {
  1622. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  1623. GFP_KERNEL, cpu_to_node(cpu));
  1624. if (!zone_pcp(zone, cpu))
  1625. goto bad;
  1626. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  1627. if (percpu_pagelist_fraction)
  1628. setup_pagelist_highmark(zone_pcp(zone, cpu),
  1629. (zone->present_pages / percpu_pagelist_fraction));
  1630. }
  1631. return 0;
  1632. bad:
  1633. for_each_zone(dzone) {
  1634. if (dzone == zone)
  1635. break;
  1636. kfree(zone_pcp(dzone, cpu));
  1637. zone_pcp(dzone, cpu) = NULL;
  1638. }
  1639. return -ENOMEM;
  1640. }
  1641. static inline void free_zone_pagesets(int cpu)
  1642. {
  1643. struct zone *zone;
  1644. for_each_zone(zone) {
  1645. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  1646. zone_pcp(zone, cpu) = NULL;
  1647. kfree(pset);
  1648. }
  1649. }
  1650. static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
  1651. unsigned long action,
  1652. void *hcpu)
  1653. {
  1654. int cpu = (long)hcpu;
  1655. int ret = NOTIFY_OK;
  1656. switch (action) {
  1657. case CPU_UP_PREPARE:
  1658. if (process_zones(cpu))
  1659. ret = NOTIFY_BAD;
  1660. break;
  1661. case CPU_UP_CANCELED:
  1662. case CPU_DEAD:
  1663. free_zone_pagesets(cpu);
  1664. break;
  1665. default:
  1666. break;
  1667. }
  1668. return ret;
  1669. }
  1670. static struct notifier_block pageset_notifier =
  1671. { &pageset_cpuup_callback, NULL, 0 };
  1672. void __init setup_per_cpu_pageset(void)
  1673. {
  1674. int err;
  1675. /* Initialize per_cpu_pageset for cpu 0.
  1676. * A cpuup callback will do this for every cpu
  1677. * as it comes online
  1678. */
  1679. err = process_zones(smp_processor_id());
  1680. BUG_ON(err);
  1681. register_cpu_notifier(&pageset_notifier);
  1682. }
  1683. #endif
  1684. static __meminit
  1685. void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  1686. {
  1687. int i;
  1688. struct pglist_data *pgdat = zone->zone_pgdat;
  1689. /*
  1690. * The per-page waitqueue mechanism uses hashed waitqueues
  1691. * per zone.
  1692. */
  1693. zone->wait_table_size = wait_table_size(zone_size_pages);
  1694. zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
  1695. zone->wait_table = (wait_queue_head_t *)
  1696. alloc_bootmem_node(pgdat, zone->wait_table_size
  1697. * sizeof(wait_queue_head_t));
  1698. for(i = 0; i < zone->wait_table_size; ++i)
  1699. init_waitqueue_head(zone->wait_table + i);
  1700. }
  1701. static __meminit void zone_pcp_init(struct zone *zone)
  1702. {
  1703. int cpu;
  1704. unsigned long batch = zone_batchsize(zone);
  1705. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  1706. #ifdef CONFIG_NUMA
  1707. /* Early boot. Slab allocator not functional yet */
  1708. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  1709. setup_pageset(&boot_pageset[cpu],0);
  1710. #else
  1711. setup_pageset(zone_pcp(zone,cpu), batch);
  1712. #endif
  1713. }
  1714. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  1715. zone->name, zone->present_pages, batch);
  1716. }
  1717. static __meminit void init_currently_empty_zone(struct zone *zone,
  1718. unsigned long zone_start_pfn, unsigned long size)
  1719. {
  1720. struct pglist_data *pgdat = zone->zone_pgdat;
  1721. zone_wait_table_init(zone, size);
  1722. pgdat->nr_zones = zone_idx(zone) + 1;
  1723. zone->zone_mem_map = pfn_to_page(zone_start_pfn);
  1724. zone->zone_start_pfn = zone_start_pfn;
  1725. memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
  1726. zone_init_free_lists(pgdat, zone, zone->spanned_pages);
  1727. }
  1728. /*
  1729. * Set up the zone data structures:
  1730. * - mark all pages reserved
  1731. * - mark all memory queues empty
  1732. * - clear the memory bitmaps
  1733. */
  1734. static void __init free_area_init_core(struct pglist_data *pgdat,
  1735. unsigned long *zones_size, unsigned long *zholes_size)
  1736. {
  1737. unsigned long j;
  1738. int nid = pgdat->node_id;
  1739. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  1740. pgdat_resize_init(pgdat);
  1741. pgdat->nr_zones = 0;
  1742. init_waitqueue_head(&pgdat->kswapd_wait);
  1743. pgdat->kswapd_max_order = 0;
  1744. for (j = 0; j < MAX_NR_ZONES; j++) {
  1745. struct zone *zone = pgdat->node_zones + j;
  1746. unsigned long size, realsize;
  1747. realsize = size = zones_size[j];
  1748. if (zholes_size)
  1749. realsize -= zholes_size[j];
  1750. if (j < ZONE_HIGHMEM)
  1751. nr_kernel_pages += realsize;
  1752. nr_all_pages += realsize;
  1753. zone->spanned_pages = size;
  1754. zone->present_pages = realsize;
  1755. zone->name = zone_names[j];
  1756. spin_lock_init(&zone->lock);
  1757. spin_lock_init(&zone->lru_lock);
  1758. zone_seqlock_init(zone);
  1759. zone->zone_pgdat = pgdat;
  1760. zone->free_pages = 0;
  1761. zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
  1762. zone_pcp_init(zone);
  1763. INIT_LIST_HEAD(&zone->active_list);
  1764. INIT_LIST_HEAD(&zone->inactive_list);
  1765. zone->nr_scan_active = 0;
  1766. zone->nr_scan_inactive = 0;
  1767. zone->nr_active = 0;
  1768. zone->nr_inactive = 0;
  1769. atomic_set(&zone->reclaim_in_progress, 0);
  1770. if (!size)
  1771. continue;
  1772. zonetable_add(zone, nid, j, zone_start_pfn, size);
  1773. init_currently_empty_zone(zone, zone_start_pfn, size);
  1774. zone_start_pfn += size;
  1775. }
  1776. }
  1777. static void __init alloc_node_mem_map(struct pglist_data *pgdat)
  1778. {
  1779. /* Skip empty nodes */
  1780. if (!pgdat->node_spanned_pages)
  1781. return;
  1782. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  1783. /* ia64 gets its own node_mem_map, before this, without bootmem */
  1784. if (!pgdat->node_mem_map) {
  1785. unsigned long size;
  1786. struct page *map;
  1787. size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
  1788. map = alloc_remap(pgdat->node_id, size);
  1789. if (!map)
  1790. map = alloc_bootmem_node(pgdat, size);
  1791. pgdat->node_mem_map = map;
  1792. }
  1793. #ifdef CONFIG_FLATMEM
  1794. /*
  1795. * With no DISCONTIG, the global mem_map is just set as node 0's
  1796. */
  1797. if (pgdat == NODE_DATA(0))
  1798. mem_map = NODE_DATA(0)->node_mem_map;
  1799. #endif
  1800. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  1801. }
  1802. void __init free_area_init_node(int nid, struct pglist_data *pgdat,
  1803. unsigned long *zones_size, unsigned long node_start_pfn,
  1804. unsigned long *zholes_size)
  1805. {
  1806. pgdat->node_id = nid;
  1807. pgdat->node_start_pfn = node_start_pfn;
  1808. calculate_zone_totalpages(pgdat, zones_size, zholes_size);
  1809. alloc_node_mem_map(pgdat);
  1810. free_area_init_core(pgdat, zones_size, zholes_size);
  1811. }
  1812. #ifndef CONFIG_NEED_MULTIPLE_NODES
  1813. static bootmem_data_t contig_bootmem_data;
  1814. struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
  1815. EXPORT_SYMBOL(contig_page_data);
  1816. #endif
  1817. void __init free_area_init(unsigned long *zones_size)
  1818. {
  1819. free_area_init_node(0, NODE_DATA(0), zones_size,
  1820. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  1821. }
  1822. #ifdef CONFIG_PROC_FS
  1823. #include <linux/seq_file.h>
  1824. static void *frag_start(struct seq_file *m, loff_t *pos)
  1825. {
  1826. pg_data_t *pgdat;
  1827. loff_t node = *pos;
  1828. for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
  1829. --node;
  1830. return pgdat;
  1831. }
  1832. static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
  1833. {
  1834. pg_data_t *pgdat = (pg_data_t *)arg;
  1835. (*pos)++;
  1836. return pgdat->pgdat_next;
  1837. }
  1838. static void frag_stop(struct seq_file *m, void *arg)
  1839. {
  1840. }
  1841. /*
  1842. * This walks the free areas for each zone.
  1843. */
  1844. static int frag_show(struct seq_file *m, void *arg)
  1845. {
  1846. pg_data_t *pgdat = (pg_data_t *)arg;
  1847. struct zone *zone;
  1848. struct zone *node_zones = pgdat->node_zones;
  1849. unsigned long flags;
  1850. int order;
  1851. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  1852. if (!populated_zone(zone))
  1853. continue;
  1854. spin_lock_irqsave(&zone->lock, flags);
  1855. seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  1856. for (order = 0; order < MAX_ORDER; ++order)
  1857. seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
  1858. spin_unlock_irqrestore(&zone->lock, flags);
  1859. seq_putc(m, '\n');
  1860. }
  1861. return 0;
  1862. }
  1863. struct seq_operations fragmentation_op = {
  1864. .start = frag_start,
  1865. .next = frag_next,
  1866. .stop = frag_stop,
  1867. .show = frag_show,
  1868. };
  1869. /*
  1870. * Output information about zones in @pgdat.
  1871. */
  1872. static int zoneinfo_show(struct seq_file *m, void *arg)
  1873. {
  1874. pg_data_t *pgdat = arg;
  1875. struct zone *zone;
  1876. struct zone *node_zones = pgdat->node_zones;
  1877. unsigned long flags;
  1878. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
  1879. int i;
  1880. if (!populated_zone(zone))
  1881. continue;
  1882. spin_lock_irqsave(&zone->lock, flags);
  1883. seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
  1884. seq_printf(m,
  1885. "\n pages free %lu"
  1886. "\n min %lu"
  1887. "\n low %lu"
  1888. "\n high %lu"
  1889. "\n active %lu"
  1890. "\n inactive %lu"
  1891. "\n scanned %lu (a: %lu i: %lu)"
  1892. "\n spanned %lu"
  1893. "\n present %lu",
  1894. zone->free_pages,
  1895. zone->pages_min,
  1896. zone->pages_low,
  1897. zone->pages_high,
  1898. zone->nr_active,
  1899. zone->nr_inactive,
  1900. zone->pages_scanned,
  1901. zone->nr_scan_active, zone->nr_scan_inactive,
  1902. zone->spanned_pages,
  1903. zone->present_pages);
  1904. seq_printf(m,
  1905. "\n protection: (%lu",
  1906. zone->lowmem_reserve[0]);
  1907. for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
  1908. seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
  1909. seq_printf(m,
  1910. ")"
  1911. "\n pagesets");
  1912. for_each_online_cpu(i) {
  1913. struct per_cpu_pageset *pageset;
  1914. int j;
  1915. pageset = zone_pcp(zone, i);
  1916. for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
  1917. if (pageset->pcp[j].count)
  1918. break;
  1919. }
  1920. if (j == ARRAY_SIZE(pageset->pcp))
  1921. continue;
  1922. for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
  1923. seq_printf(m,
  1924. "\n cpu: %i pcp: %i"
  1925. "\n count: %i"
  1926. "\n high: %i"
  1927. "\n batch: %i",
  1928. i, j,
  1929. pageset->pcp[j].count,
  1930. pageset->pcp[j].high,
  1931. pageset->pcp[j].batch);
  1932. }
  1933. #ifdef CONFIG_NUMA
  1934. seq_printf(m,
  1935. "\n numa_hit: %lu"
  1936. "\n numa_miss: %lu"
  1937. "\n numa_foreign: %lu"
  1938. "\n interleave_hit: %lu"
  1939. "\n local_node: %lu"
  1940. "\n other_node: %lu",
  1941. pageset->numa_hit,
  1942. pageset->numa_miss,
  1943. pageset->numa_foreign,
  1944. pageset->interleave_hit,
  1945. pageset->local_node,
  1946. pageset->other_node);
  1947. #endif
  1948. }
  1949. seq_printf(m,
  1950. "\n all_unreclaimable: %u"
  1951. "\n prev_priority: %i"
  1952. "\n temp_priority: %i"
  1953. "\n start_pfn: %lu",
  1954. zone->all_unreclaimable,
  1955. zone->prev_priority,
  1956. zone->temp_priority,
  1957. zone->zone_start_pfn);
  1958. spin_unlock_irqrestore(&zone->lock, flags);
  1959. seq_putc(m, '\n');
  1960. }
  1961. return 0;
  1962. }
  1963. struct seq_operations zoneinfo_op = {
  1964. .start = frag_start, /* iterate over all zones. The same as in
  1965. * fragmentation. */
  1966. .next = frag_next,
  1967. .stop = frag_stop,
  1968. .show = zoneinfo_show,
  1969. };
  1970. static char *vmstat_text[] = {
  1971. "nr_dirty",
  1972. "nr_writeback",
  1973. "nr_unstable",
  1974. "nr_page_table_pages",
  1975. "nr_mapped",
  1976. "nr_slab",
  1977. "pgpgin",
  1978. "pgpgout",
  1979. "pswpin",
  1980. "pswpout",
  1981. "pgalloc_high",
  1982. "pgalloc_normal",
  1983. "pgalloc_dma32",
  1984. "pgalloc_dma",
  1985. "pgfree",
  1986. "pgactivate",
  1987. "pgdeactivate",
  1988. "pgfault",
  1989. "pgmajfault",
  1990. "pgrefill_high",
  1991. "pgrefill_normal",
  1992. "pgrefill_dma32",
  1993. "pgrefill_dma",
  1994. "pgsteal_high",
  1995. "pgsteal_normal",
  1996. "pgsteal_dma32",
  1997. "pgsteal_dma",
  1998. "pgscan_kswapd_high",
  1999. "pgscan_kswapd_normal",
  2000. "pgscan_kswapd_dma32",
  2001. "pgscan_kswapd_dma",
  2002. "pgscan_direct_high",
  2003. "pgscan_direct_normal",
  2004. "pgscan_direct_dma32",
  2005. "pgscan_direct_dma",
  2006. "pginodesteal",
  2007. "slabs_scanned",
  2008. "kswapd_steal",
  2009. "kswapd_inodesteal",
  2010. "pageoutrun",
  2011. "allocstall",
  2012. "pgrotated",
  2013. "nr_bounce",
  2014. };
  2015. static void *vmstat_start(struct seq_file *m, loff_t *pos)
  2016. {
  2017. struct page_state *ps;
  2018. if (*pos >= ARRAY_SIZE(vmstat_text))
  2019. return NULL;
  2020. ps = kmalloc(sizeof(*ps), GFP_KERNEL);
  2021. m->private = ps;
  2022. if (!ps)
  2023. return ERR_PTR(-ENOMEM);
  2024. get_full_page_state(ps);
  2025. ps->pgpgin /= 2; /* sectors -> kbytes */
  2026. ps->pgpgout /= 2;
  2027. return (unsigned long *)ps + *pos;
  2028. }
  2029. static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
  2030. {
  2031. (*pos)++;
  2032. if (*pos >= ARRAY_SIZE(vmstat_text))
  2033. return NULL;
  2034. return (unsigned long *)m->private + *pos;
  2035. }
  2036. static int vmstat_show(struct seq_file *m, void *arg)
  2037. {
  2038. unsigned long *l = arg;
  2039. unsigned long off = l - (unsigned long *)m->private;
  2040. seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
  2041. return 0;
  2042. }
  2043. static void vmstat_stop(struct seq_file *m, void *arg)
  2044. {
  2045. kfree(m->private);
  2046. m->private = NULL;
  2047. }
  2048. struct seq_operations vmstat_op = {
  2049. .start = vmstat_start,
  2050. .next = vmstat_next,
  2051. .stop = vmstat_stop,
  2052. .show = vmstat_show,
  2053. };
  2054. #endif /* CONFIG_PROC_FS */
  2055. #ifdef CONFIG_HOTPLUG_CPU
  2056. static int page_alloc_cpu_notify(struct notifier_block *self,
  2057. unsigned long action, void *hcpu)
  2058. {
  2059. int cpu = (unsigned long)hcpu;
  2060. long *count;
  2061. unsigned long *src, *dest;
  2062. if (action == CPU_DEAD) {
  2063. int i;
  2064. /* Drain local pagecache count. */
  2065. count = &per_cpu(nr_pagecache_local, cpu);
  2066. atomic_add(*count, &nr_pagecache);
  2067. *count = 0;
  2068. local_irq_disable();
  2069. __drain_pages(cpu);
  2070. /* Add dead cpu's page_states to our own. */
  2071. dest = (unsigned long *)&__get_cpu_var(page_states);
  2072. src = (unsigned long *)&per_cpu(page_states, cpu);
  2073. for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
  2074. i++) {
  2075. dest[i] += src[i];
  2076. src[i] = 0;
  2077. }
  2078. local_irq_enable();
  2079. }
  2080. return NOTIFY_OK;
  2081. }
  2082. #endif /* CONFIG_HOTPLUG_CPU */
  2083. void __init page_alloc_init(void)
  2084. {
  2085. hotcpu_notifier(page_alloc_cpu_notify, 0);
  2086. }
  2087. /*
  2088. * setup_per_zone_lowmem_reserve - called whenever
  2089. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  2090. * has a correct pages reserved value, so an adequate number of
  2091. * pages are left in the zone after a successful __alloc_pages().
  2092. */
  2093. static void setup_per_zone_lowmem_reserve(void)
  2094. {
  2095. struct pglist_data *pgdat;
  2096. int j, idx;
  2097. for_each_pgdat(pgdat) {
  2098. for (j = 0; j < MAX_NR_ZONES; j++) {
  2099. struct zone *zone = pgdat->node_zones + j;
  2100. unsigned long present_pages = zone->present_pages;
  2101. zone->lowmem_reserve[j] = 0;
  2102. for (idx = j-1; idx >= 0; idx--) {
  2103. struct zone *lower_zone;
  2104. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  2105. sysctl_lowmem_reserve_ratio[idx] = 1;
  2106. lower_zone = pgdat->node_zones + idx;
  2107. lower_zone->lowmem_reserve[j] = present_pages /
  2108. sysctl_lowmem_reserve_ratio[idx];
  2109. present_pages += lower_zone->present_pages;
  2110. }
  2111. }
  2112. }
  2113. }
  2114. /*
  2115. * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
  2116. * that the pages_{min,low,high} values for each zone are set correctly
  2117. * with respect to min_free_kbytes.
  2118. */
  2119. void setup_per_zone_pages_min(void)
  2120. {
  2121. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  2122. unsigned long lowmem_pages = 0;
  2123. struct zone *zone;
  2124. unsigned long flags;
  2125. /* Calculate total number of !ZONE_HIGHMEM pages */
  2126. for_each_zone(zone) {
  2127. if (!is_highmem(zone))
  2128. lowmem_pages += zone->present_pages;
  2129. }
  2130. for_each_zone(zone) {
  2131. unsigned long tmp;
  2132. spin_lock_irqsave(&zone->lru_lock, flags);
  2133. tmp = (pages_min * zone->present_pages) / lowmem_pages;
  2134. if (is_highmem(zone)) {
  2135. /*
  2136. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  2137. * need highmem pages, so cap pages_min to a small
  2138. * value here.
  2139. *
  2140. * The (pages_high-pages_low) and (pages_low-pages_min)
  2141. * deltas controls asynch page reclaim, and so should
  2142. * not be capped for highmem.
  2143. */
  2144. int min_pages;
  2145. min_pages = zone->present_pages / 1024;
  2146. if (min_pages < SWAP_CLUSTER_MAX)
  2147. min_pages = SWAP_CLUSTER_MAX;
  2148. if (min_pages > 128)
  2149. min_pages = 128;
  2150. zone->pages_min = min_pages;
  2151. } else {
  2152. /*
  2153. * If it's a lowmem zone, reserve a number of pages
  2154. * proportionate to the zone's size.
  2155. */
  2156. zone->pages_min = tmp;
  2157. }
  2158. zone->pages_low = zone->pages_min + tmp / 4;
  2159. zone->pages_high = zone->pages_min + tmp / 2;
  2160. spin_unlock_irqrestore(&zone->lru_lock, flags);
  2161. }
  2162. }
  2163. /*
  2164. * Initialise min_free_kbytes.
  2165. *
  2166. * For small machines we want it small (128k min). For large machines
  2167. * we want it large (64MB max). But it is not linear, because network
  2168. * bandwidth does not increase linearly with machine size. We use
  2169. *
  2170. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  2171. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  2172. *
  2173. * which yields
  2174. *
  2175. * 16MB: 512k
  2176. * 32MB: 724k
  2177. * 64MB: 1024k
  2178. * 128MB: 1448k
  2179. * 256MB: 2048k
  2180. * 512MB: 2896k
  2181. * 1024MB: 4096k
  2182. * 2048MB: 5792k
  2183. * 4096MB: 8192k
  2184. * 8192MB: 11584k
  2185. * 16384MB: 16384k
  2186. */
  2187. static int __init init_per_zone_pages_min(void)
  2188. {
  2189. unsigned long lowmem_kbytes;
  2190. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  2191. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  2192. if (min_free_kbytes < 128)
  2193. min_free_kbytes = 128;
  2194. if (min_free_kbytes > 65536)
  2195. min_free_kbytes = 65536;
  2196. setup_per_zone_pages_min();
  2197. setup_per_zone_lowmem_reserve();
  2198. return 0;
  2199. }
  2200. module_init(init_per_zone_pages_min)
  2201. /*
  2202. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  2203. * that we can call two helper functions whenever min_free_kbytes
  2204. * changes.
  2205. */
  2206. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  2207. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2208. {
  2209. proc_dointvec(table, write, file, buffer, length, ppos);
  2210. setup_per_zone_pages_min();
  2211. return 0;
  2212. }
  2213. /*
  2214. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  2215. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  2216. * whenever sysctl_lowmem_reserve_ratio changes.
  2217. *
  2218. * The reserve ratio obviously has absolutely no relation with the
  2219. * pages_min watermarks. The lowmem reserve ratio can only make sense
  2220. * if in function of the boot time zone sizes.
  2221. */
  2222. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  2223. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2224. {
  2225. proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2226. setup_per_zone_lowmem_reserve();
  2227. return 0;
  2228. }
  2229. /*
  2230. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  2231. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  2232. * can have before it gets flushed back to buddy allocator.
  2233. */
  2234. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  2235. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2236. {
  2237. struct zone *zone;
  2238. unsigned int cpu;
  2239. int ret;
  2240. ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2241. if (!write || (ret == -EINVAL))
  2242. return ret;
  2243. for_each_zone(zone) {
  2244. for_each_online_cpu(cpu) {
  2245. unsigned long high;
  2246. high = zone->present_pages / percpu_pagelist_fraction;
  2247. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  2248. }
  2249. }
  2250. return 0;
  2251. }
  2252. __initdata int hashdist = HASHDIST_DEFAULT;
  2253. #ifdef CONFIG_NUMA
  2254. static int __init set_hashdist(char *str)
  2255. {
  2256. if (!str)
  2257. return 0;
  2258. hashdist = simple_strtoul(str, &str, 0);
  2259. return 1;
  2260. }
  2261. __setup("hashdist=", set_hashdist);
  2262. #endif
  2263. /*
  2264. * allocate a large system hash table from bootmem
  2265. * - it is assumed that the hash table must contain an exact power-of-2
  2266. * quantity of entries
  2267. * - limit is the number of hash buckets, not the total allocation size
  2268. */
  2269. void *__init alloc_large_system_hash(const char *tablename,
  2270. unsigned long bucketsize,
  2271. unsigned long numentries,
  2272. int scale,
  2273. int flags,
  2274. unsigned int *_hash_shift,
  2275. unsigned int *_hash_mask,
  2276. unsigned long limit)
  2277. {
  2278. unsigned long long max = limit;
  2279. unsigned long log2qty, size;
  2280. void *table = NULL;
  2281. /* allow the kernel cmdline to have a say */
  2282. if (!numentries) {
  2283. /* round applicable memory size up to nearest megabyte */
  2284. numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
  2285. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  2286. numentries >>= 20 - PAGE_SHIFT;
  2287. numentries <<= 20 - PAGE_SHIFT;
  2288. /* limit to 1 bucket per 2^scale bytes of low memory */
  2289. if (scale > PAGE_SHIFT)
  2290. numentries >>= (scale - PAGE_SHIFT);
  2291. else
  2292. numentries <<= (PAGE_SHIFT - scale);
  2293. }
  2294. /* rounded up to nearest power of 2 in size */
  2295. numentries = 1UL << (long_log2(numentries) + 1);
  2296. /* limit allocation size to 1/16 total memory by default */
  2297. if (max == 0) {
  2298. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  2299. do_div(max, bucketsize);
  2300. }
  2301. if (numentries > max)
  2302. numentries = max;
  2303. log2qty = long_log2(numentries);
  2304. do {
  2305. size = bucketsize << log2qty;
  2306. if (flags & HASH_EARLY)
  2307. table = alloc_bootmem(size);
  2308. else if (hashdist)
  2309. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  2310. else {
  2311. unsigned long order;
  2312. for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
  2313. ;
  2314. table = (void*) __get_free_pages(GFP_ATOMIC, order);
  2315. }
  2316. } while (!table && size > PAGE_SIZE && --log2qty);
  2317. if (!table)
  2318. panic("Failed to allocate %s hash table\n", tablename);
  2319. printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
  2320. tablename,
  2321. (1U << log2qty),
  2322. long_log2(size) - PAGE_SHIFT,
  2323. size);
  2324. if (_hash_shift)
  2325. *_hash_shift = log2qty;
  2326. if (_hash_mask)
  2327. *_hash_mask = (1 << log2qty) - 1;
  2328. return table;
  2329. }