page_alloc.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/compiler.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/suspend.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/slab.h>
  29. #include <linux/notifier.h>
  30. #include <linux/topology.h>
  31. #include <linux/sysctl.h>
  32. #include <linux/cpu.h>
  33. #include <linux/cpuset.h>
  34. #include <linux/memory_hotplug.h>
  35. #include <linux/nodemask.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/mempolicy.h>
  38. #include <linux/stop_machine.h>
  39. #include <asm/tlbflush.h>
  40. #include <asm/div64.h>
  41. #include "internal.h"
  42. /*
  43. * MCD - HACK: Find somewhere to initialize this EARLY, or make this
  44. * initializer cleaner
  45. */
  46. nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
  47. EXPORT_SYMBOL(node_online_map);
  48. nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
  49. EXPORT_SYMBOL(node_possible_map);
  50. unsigned long totalram_pages __read_mostly;
  51. unsigned long totalreserve_pages __read_mostly;
  52. long nr_swap_pages;
  53. int percpu_pagelist_fraction;
  54. static void __free_pages_ok(struct page *page, unsigned int order);
  55. /*
  56. * results with 256, 32 in the lowmem_reserve sysctl:
  57. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  58. * 1G machine -> (16M dma, 784M normal, 224M high)
  59. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  60. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  61. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  62. *
  63. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  64. * don't need any ZONE_NORMAL reservation
  65. */
  66. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  67. 256,
  68. #ifdef CONFIG_ZONE_DMA32
  69. 256,
  70. #endif
  71. #ifdef CONFIG_HIGHMEM
  72. 32
  73. #endif
  74. };
  75. EXPORT_SYMBOL(totalram_pages);
  76. /*
  77. * Used by page_zone() to look up the address of the struct zone whose
  78. * id is encoded in the upper bits of page->flags
  79. */
  80. struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
  81. EXPORT_SYMBOL(zone_table);
  82. static char *zone_names[MAX_NR_ZONES] = {
  83. "DMA",
  84. #ifdef CONFIG_ZONE_DMA32
  85. "DMA32",
  86. #endif
  87. "Normal",
  88. #ifdef CONFIG_HIGHMEM
  89. "HighMem"
  90. #endif
  91. };
  92. int min_free_kbytes = 1024;
  93. unsigned long __meminitdata nr_kernel_pages;
  94. unsigned long __meminitdata nr_all_pages;
  95. #ifdef CONFIG_DEBUG_VM
  96. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  97. {
  98. int ret = 0;
  99. unsigned seq;
  100. unsigned long pfn = page_to_pfn(page);
  101. do {
  102. seq = zone_span_seqbegin(zone);
  103. if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  104. ret = 1;
  105. else if (pfn < zone->zone_start_pfn)
  106. ret = 1;
  107. } while (zone_span_seqretry(zone, seq));
  108. return ret;
  109. }
  110. static int page_is_consistent(struct zone *zone, struct page *page)
  111. {
  112. #ifdef CONFIG_HOLES_IN_ZONE
  113. if (!pfn_valid(page_to_pfn(page)))
  114. return 0;
  115. #endif
  116. if (zone != page_zone(page))
  117. return 0;
  118. return 1;
  119. }
  120. /*
  121. * Temporary debugging check for pages not lying within a given zone.
  122. */
  123. static int bad_range(struct zone *zone, struct page *page)
  124. {
  125. if (page_outside_zone_boundaries(zone, page))
  126. return 1;
  127. if (!page_is_consistent(zone, page))
  128. return 1;
  129. return 0;
  130. }
  131. #else
  132. static inline int bad_range(struct zone *zone, struct page *page)
  133. {
  134. return 0;
  135. }
  136. #endif
  137. static void bad_page(struct page *page)
  138. {
  139. printk(KERN_EMERG "Bad page state in process '%s'\n"
  140. KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
  141. KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
  142. KERN_EMERG "Backtrace:\n",
  143. current->comm, page, (int)(2*sizeof(unsigned long)),
  144. (unsigned long)page->flags, page->mapping,
  145. page_mapcount(page), page_count(page));
  146. dump_stack();
  147. page->flags &= ~(1 << PG_lru |
  148. 1 << PG_private |
  149. 1 << PG_locked |
  150. 1 << PG_active |
  151. 1 << PG_dirty |
  152. 1 << PG_reclaim |
  153. 1 << PG_slab |
  154. 1 << PG_swapcache |
  155. 1 << PG_writeback |
  156. 1 << PG_buddy );
  157. set_page_count(page, 0);
  158. reset_page_mapcount(page);
  159. page->mapping = NULL;
  160. add_taint(TAINT_BAD_PAGE);
  161. }
  162. /*
  163. * Higher-order pages are called "compound pages". They are structured thusly:
  164. *
  165. * The first PAGE_SIZE page is called the "head page".
  166. *
  167. * The remaining PAGE_SIZE pages are called "tail pages".
  168. *
  169. * All pages have PG_compound set. All pages have their ->private pointing at
  170. * the head page (even the head page has this).
  171. *
  172. * The first tail page's ->lru.next holds the address of the compound page's
  173. * put_page() function. Its ->lru.prev holds the order of allocation.
  174. * This usage means that zero-order pages may not be compound.
  175. */
  176. static void free_compound_page(struct page *page)
  177. {
  178. __free_pages_ok(page, (unsigned long)page[1].lru.prev);
  179. }
  180. static void prep_compound_page(struct page *page, unsigned long order)
  181. {
  182. int i;
  183. int nr_pages = 1 << order;
  184. page[1].lru.next = (void *)free_compound_page; /* set dtor */
  185. page[1].lru.prev = (void *)order;
  186. for (i = 0; i < nr_pages; i++) {
  187. struct page *p = page + i;
  188. __SetPageCompound(p);
  189. set_page_private(p, (unsigned long)page);
  190. }
  191. }
  192. static void destroy_compound_page(struct page *page, unsigned long order)
  193. {
  194. int i;
  195. int nr_pages = 1 << order;
  196. if (unlikely((unsigned long)page[1].lru.prev != order))
  197. bad_page(page);
  198. for (i = 0; i < nr_pages; i++) {
  199. struct page *p = page + i;
  200. if (unlikely(!PageCompound(p) |
  201. (page_private(p) != (unsigned long)page)))
  202. bad_page(page);
  203. __ClearPageCompound(p);
  204. }
  205. }
  206. static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  207. {
  208. int i;
  209. VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
  210. /*
  211. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  212. * and __GFP_HIGHMEM from hard or soft interrupt context.
  213. */
  214. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  215. for (i = 0; i < (1 << order); i++)
  216. clear_highpage(page + i);
  217. }
  218. /*
  219. * function for dealing with page's order in buddy system.
  220. * zone->lock is already acquired when we use these.
  221. * So, we don't need atomic page->flags operations here.
  222. */
  223. static inline unsigned long page_order(struct page *page)
  224. {
  225. return page_private(page);
  226. }
  227. static inline void set_page_order(struct page *page, int order)
  228. {
  229. set_page_private(page, order);
  230. __SetPageBuddy(page);
  231. }
  232. static inline void rmv_page_order(struct page *page)
  233. {
  234. __ClearPageBuddy(page);
  235. set_page_private(page, 0);
  236. }
  237. /*
  238. * Locate the struct page for both the matching buddy in our
  239. * pair (buddy1) and the combined O(n+1) page they form (page).
  240. *
  241. * 1) Any buddy B1 will have an order O twin B2 which satisfies
  242. * the following equation:
  243. * B2 = B1 ^ (1 << O)
  244. * For example, if the starting buddy (buddy2) is #8 its order
  245. * 1 buddy is #10:
  246. * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  247. *
  248. * 2) Any buddy B will have an order O+1 parent P which
  249. * satisfies the following equation:
  250. * P = B & ~(1 << O)
  251. *
  252. * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  253. */
  254. static inline struct page *
  255. __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
  256. {
  257. unsigned long buddy_idx = page_idx ^ (1 << order);
  258. return page + (buddy_idx - page_idx);
  259. }
  260. static inline unsigned long
  261. __find_combined_index(unsigned long page_idx, unsigned int order)
  262. {
  263. return (page_idx & ~(1 << order));
  264. }
  265. /*
  266. * This function checks whether a page is free && is the buddy
  267. * we can do coalesce a page and its buddy if
  268. * (a) the buddy is not in a hole &&
  269. * (b) the buddy is in the buddy system &&
  270. * (c) a page and its buddy have the same order &&
  271. * (d) a page and its buddy are in the same zone.
  272. *
  273. * For recording whether a page is in the buddy system, we use PG_buddy.
  274. * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
  275. *
  276. * For recording page's order, we use page_private(page).
  277. */
  278. static inline int page_is_buddy(struct page *page, struct page *buddy,
  279. int order)
  280. {
  281. #ifdef CONFIG_HOLES_IN_ZONE
  282. if (!pfn_valid(page_to_pfn(buddy)))
  283. return 0;
  284. #endif
  285. if (page_zone_id(page) != page_zone_id(buddy))
  286. return 0;
  287. if (PageBuddy(buddy) && page_order(buddy) == order) {
  288. BUG_ON(page_count(buddy) != 0);
  289. return 1;
  290. }
  291. return 0;
  292. }
  293. /*
  294. * Freeing function for a buddy system allocator.
  295. *
  296. * The concept of a buddy system is to maintain direct-mapped table
  297. * (containing bit values) for memory blocks of various "orders".
  298. * The bottom level table contains the map for the smallest allocatable
  299. * units of memory (here, pages), and each level above it describes
  300. * pairs of units from the levels below, hence, "buddies".
  301. * At a high level, all that happens here is marking the table entry
  302. * at the bottom level available, and propagating the changes upward
  303. * as necessary, plus some accounting needed to play nicely with other
  304. * parts of the VM system.
  305. * At each level, we keep a list of pages, which are heads of continuous
  306. * free pages of length of (1 << order) and marked with PG_buddy. Page's
  307. * order is recorded in page_private(page) field.
  308. * So when we are allocating or freeing one, we can derive the state of the
  309. * other. That is, if we allocate a small block, and both were
  310. * free, the remainder of the region must be split into blocks.
  311. * If a block is freed, and its buddy is also free, then this
  312. * triggers coalescing into a block of larger size.
  313. *
  314. * -- wli
  315. */
  316. static inline void __free_one_page(struct page *page,
  317. struct zone *zone, unsigned int order)
  318. {
  319. unsigned long page_idx;
  320. int order_size = 1 << order;
  321. if (unlikely(PageCompound(page)))
  322. destroy_compound_page(page, order);
  323. page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  324. VM_BUG_ON(page_idx & (order_size - 1));
  325. VM_BUG_ON(bad_range(zone, page));
  326. zone->free_pages += order_size;
  327. while (order < MAX_ORDER-1) {
  328. unsigned long combined_idx;
  329. struct free_area *area;
  330. struct page *buddy;
  331. buddy = __page_find_buddy(page, page_idx, order);
  332. if (!page_is_buddy(page, buddy, order))
  333. break; /* Move the buddy up one level. */
  334. list_del(&buddy->lru);
  335. area = zone->free_area + order;
  336. area->nr_free--;
  337. rmv_page_order(buddy);
  338. combined_idx = __find_combined_index(page_idx, order);
  339. page = page + (combined_idx - page_idx);
  340. page_idx = combined_idx;
  341. order++;
  342. }
  343. set_page_order(page, order);
  344. list_add(&page->lru, &zone->free_area[order].free_list);
  345. zone->free_area[order].nr_free++;
  346. }
  347. static inline int free_pages_check(struct page *page)
  348. {
  349. if (unlikely(page_mapcount(page) |
  350. (page->mapping != NULL) |
  351. (page_count(page) != 0) |
  352. (page->flags & (
  353. 1 << PG_lru |
  354. 1 << PG_private |
  355. 1 << PG_locked |
  356. 1 << PG_active |
  357. 1 << PG_reclaim |
  358. 1 << PG_slab |
  359. 1 << PG_swapcache |
  360. 1 << PG_writeback |
  361. 1 << PG_reserved |
  362. 1 << PG_buddy ))))
  363. bad_page(page);
  364. if (PageDirty(page))
  365. __ClearPageDirty(page);
  366. /*
  367. * For now, we report if PG_reserved was found set, but do not
  368. * clear it, and do not free the page. But we shall soon need
  369. * to do more, for when the ZERO_PAGE count wraps negative.
  370. */
  371. return PageReserved(page);
  372. }
  373. /*
  374. * Frees a list of pages.
  375. * Assumes all pages on list are in same zone, and of same order.
  376. * count is the number of pages to free.
  377. *
  378. * If the zone was previously in an "all pages pinned" state then look to
  379. * see if this freeing clears that state.
  380. *
  381. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  382. * pinned" detection logic.
  383. */
  384. static void free_pages_bulk(struct zone *zone, int count,
  385. struct list_head *list, int order)
  386. {
  387. spin_lock(&zone->lock);
  388. zone->all_unreclaimable = 0;
  389. zone->pages_scanned = 0;
  390. while (count--) {
  391. struct page *page;
  392. VM_BUG_ON(list_empty(list));
  393. page = list_entry(list->prev, struct page, lru);
  394. /* have to delete it as __free_one_page list manipulates */
  395. list_del(&page->lru);
  396. __free_one_page(page, zone, order);
  397. }
  398. spin_unlock(&zone->lock);
  399. }
  400. static void free_one_page(struct zone *zone, struct page *page, int order)
  401. {
  402. spin_lock(&zone->lock);
  403. zone->all_unreclaimable = 0;
  404. zone->pages_scanned = 0;
  405. __free_one_page(page, zone ,order);
  406. spin_unlock(&zone->lock);
  407. }
  408. static void __free_pages_ok(struct page *page, unsigned int order)
  409. {
  410. unsigned long flags;
  411. int i;
  412. int reserved = 0;
  413. arch_free_page(page, order);
  414. if (!PageHighMem(page))
  415. debug_check_no_locks_freed(page_address(page),
  416. PAGE_SIZE<<order);
  417. for (i = 0 ; i < (1 << order) ; ++i)
  418. reserved += free_pages_check(page + i);
  419. if (reserved)
  420. return;
  421. kernel_map_pages(page, 1 << order, 0);
  422. local_irq_save(flags);
  423. __count_vm_events(PGFREE, 1 << order);
  424. free_one_page(page_zone(page), page, order);
  425. local_irq_restore(flags);
  426. }
  427. /*
  428. * permit the bootmem allocator to evade page validation on high-order frees
  429. */
  430. void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
  431. {
  432. if (order == 0) {
  433. __ClearPageReserved(page);
  434. set_page_count(page, 0);
  435. set_page_refcounted(page);
  436. __free_page(page);
  437. } else {
  438. int loop;
  439. prefetchw(page);
  440. for (loop = 0; loop < BITS_PER_LONG; loop++) {
  441. struct page *p = &page[loop];
  442. if (loop + 1 < BITS_PER_LONG)
  443. prefetchw(p + 1);
  444. __ClearPageReserved(p);
  445. set_page_count(p, 0);
  446. }
  447. set_page_refcounted(page);
  448. __free_pages(page, order);
  449. }
  450. }
  451. /*
  452. * The order of subdivision here is critical for the IO subsystem.
  453. * Please do not alter this order without good reasons and regression
  454. * testing. Specifically, as large blocks of memory are subdivided,
  455. * the order in which smaller blocks are delivered depends on the order
  456. * they're subdivided in this function. This is the primary factor
  457. * influencing the order in which pages are delivered to the IO
  458. * subsystem according to empirical testing, and this is also justified
  459. * by considering the behavior of a buddy system containing a single
  460. * large block of memory acted on by a series of small allocations.
  461. * This behavior is a critical factor in sglist merging's success.
  462. *
  463. * -- wli
  464. */
  465. static inline void expand(struct zone *zone, struct page *page,
  466. int low, int high, struct free_area *area)
  467. {
  468. unsigned long size = 1 << high;
  469. while (high > low) {
  470. area--;
  471. high--;
  472. size >>= 1;
  473. VM_BUG_ON(bad_range(zone, &page[size]));
  474. list_add(&page[size].lru, &area->free_list);
  475. area->nr_free++;
  476. set_page_order(&page[size], high);
  477. }
  478. }
  479. /*
  480. * This page is about to be returned from the page allocator
  481. */
  482. static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  483. {
  484. if (unlikely(page_mapcount(page) |
  485. (page->mapping != NULL) |
  486. (page_count(page) != 0) |
  487. (page->flags & (
  488. 1 << PG_lru |
  489. 1 << PG_private |
  490. 1 << PG_locked |
  491. 1 << PG_active |
  492. 1 << PG_dirty |
  493. 1 << PG_reclaim |
  494. 1 << PG_slab |
  495. 1 << PG_swapcache |
  496. 1 << PG_writeback |
  497. 1 << PG_reserved |
  498. 1 << PG_buddy ))))
  499. bad_page(page);
  500. /*
  501. * For now, we report if PG_reserved was found set, but do not
  502. * clear it, and do not allocate the page: as a safety net.
  503. */
  504. if (PageReserved(page))
  505. return 1;
  506. page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
  507. 1 << PG_referenced | 1 << PG_arch_1 |
  508. 1 << PG_checked | 1 << PG_mappedtodisk);
  509. set_page_private(page, 0);
  510. set_page_refcounted(page);
  511. kernel_map_pages(page, 1 << order, 1);
  512. if (gfp_flags & __GFP_ZERO)
  513. prep_zero_page(page, order, gfp_flags);
  514. if (order && (gfp_flags & __GFP_COMP))
  515. prep_compound_page(page, order);
  516. return 0;
  517. }
  518. /*
  519. * Do the hard work of removing an element from the buddy allocator.
  520. * Call me with the zone->lock already held.
  521. */
  522. static struct page *__rmqueue(struct zone *zone, unsigned int order)
  523. {
  524. struct free_area * area;
  525. unsigned int current_order;
  526. struct page *page;
  527. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  528. area = zone->free_area + current_order;
  529. if (list_empty(&area->free_list))
  530. continue;
  531. page = list_entry(area->free_list.next, struct page, lru);
  532. list_del(&page->lru);
  533. rmv_page_order(page);
  534. area->nr_free--;
  535. zone->free_pages -= 1UL << order;
  536. expand(zone, page, order, current_order, area);
  537. return page;
  538. }
  539. return NULL;
  540. }
  541. /*
  542. * Obtain a specified number of elements from the buddy allocator, all under
  543. * a single hold of the lock, for efficiency. Add them to the supplied list.
  544. * Returns the number of new pages which were placed at *list.
  545. */
  546. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  547. unsigned long count, struct list_head *list)
  548. {
  549. int i;
  550. spin_lock(&zone->lock);
  551. for (i = 0; i < count; ++i) {
  552. struct page *page = __rmqueue(zone, order);
  553. if (unlikely(page == NULL))
  554. break;
  555. list_add_tail(&page->lru, list);
  556. }
  557. spin_unlock(&zone->lock);
  558. return i;
  559. }
  560. #ifdef CONFIG_NUMA
  561. /*
  562. * Called from the slab reaper to drain pagesets on a particular node that
  563. * belongs to the currently executing processor.
  564. * Note that this function must be called with the thread pinned to
  565. * a single processor.
  566. */
  567. void drain_node_pages(int nodeid)
  568. {
  569. int i;
  570. enum zone_type z;
  571. unsigned long flags;
  572. for (z = 0; z < MAX_NR_ZONES; z++) {
  573. struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
  574. struct per_cpu_pageset *pset;
  575. if (!populated_zone(zone))
  576. continue;
  577. pset = zone_pcp(zone, smp_processor_id());
  578. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  579. struct per_cpu_pages *pcp;
  580. pcp = &pset->pcp[i];
  581. if (pcp->count) {
  582. local_irq_save(flags);
  583. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  584. pcp->count = 0;
  585. local_irq_restore(flags);
  586. }
  587. }
  588. }
  589. }
  590. #endif
  591. #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
  592. static void __drain_pages(unsigned int cpu)
  593. {
  594. unsigned long flags;
  595. struct zone *zone;
  596. int i;
  597. for_each_zone(zone) {
  598. struct per_cpu_pageset *pset;
  599. pset = zone_pcp(zone, cpu);
  600. for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
  601. struct per_cpu_pages *pcp;
  602. pcp = &pset->pcp[i];
  603. local_irq_save(flags);
  604. free_pages_bulk(zone, pcp->count, &pcp->list, 0);
  605. pcp->count = 0;
  606. local_irq_restore(flags);
  607. }
  608. }
  609. }
  610. #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
  611. #ifdef CONFIG_PM
  612. void mark_free_pages(struct zone *zone)
  613. {
  614. unsigned long pfn, max_zone_pfn;
  615. unsigned long flags;
  616. int order;
  617. struct list_head *curr;
  618. if (!zone->spanned_pages)
  619. return;
  620. spin_lock_irqsave(&zone->lock, flags);
  621. max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  622. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  623. if (pfn_valid(pfn)) {
  624. struct page *page = pfn_to_page(pfn);
  625. if (!PageNosave(page))
  626. ClearPageNosaveFree(page);
  627. }
  628. for (order = MAX_ORDER - 1; order >= 0; --order)
  629. list_for_each(curr, &zone->free_area[order].free_list) {
  630. unsigned long i;
  631. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  632. for (i = 0; i < (1UL << order); i++)
  633. SetPageNosaveFree(pfn_to_page(pfn + i));
  634. }
  635. spin_unlock_irqrestore(&zone->lock, flags);
  636. }
  637. /*
  638. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  639. */
  640. void drain_local_pages(void)
  641. {
  642. unsigned long flags;
  643. local_irq_save(flags);
  644. __drain_pages(smp_processor_id());
  645. local_irq_restore(flags);
  646. }
  647. #endif /* CONFIG_PM */
  648. /*
  649. * Free a 0-order page
  650. */
  651. static void fastcall free_hot_cold_page(struct page *page, int cold)
  652. {
  653. struct zone *zone = page_zone(page);
  654. struct per_cpu_pages *pcp;
  655. unsigned long flags;
  656. arch_free_page(page, 0);
  657. if (PageAnon(page))
  658. page->mapping = NULL;
  659. if (free_pages_check(page))
  660. return;
  661. kernel_map_pages(page, 1, 0);
  662. pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
  663. local_irq_save(flags);
  664. __count_vm_event(PGFREE);
  665. list_add(&page->lru, &pcp->list);
  666. pcp->count++;
  667. if (pcp->count >= pcp->high) {
  668. free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
  669. pcp->count -= pcp->batch;
  670. }
  671. local_irq_restore(flags);
  672. put_cpu();
  673. }
  674. void fastcall free_hot_page(struct page *page)
  675. {
  676. free_hot_cold_page(page, 0);
  677. }
  678. void fastcall free_cold_page(struct page *page)
  679. {
  680. free_hot_cold_page(page, 1);
  681. }
  682. /*
  683. * split_page takes a non-compound higher-order page, and splits it into
  684. * n (1<<order) sub-pages: page[0..n]
  685. * Each sub-page must be freed individually.
  686. *
  687. * Note: this is probably too low level an operation for use in drivers.
  688. * Please consult with lkml before using this in your driver.
  689. */
  690. void split_page(struct page *page, unsigned int order)
  691. {
  692. int i;
  693. VM_BUG_ON(PageCompound(page));
  694. VM_BUG_ON(!page_count(page));
  695. for (i = 1; i < (1 << order); i++)
  696. set_page_refcounted(page + i);
  697. }
  698. /*
  699. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  700. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  701. * or two.
  702. */
  703. static struct page *buffered_rmqueue(struct zonelist *zonelist,
  704. struct zone *zone, int order, gfp_t gfp_flags)
  705. {
  706. unsigned long flags;
  707. struct page *page;
  708. int cold = !!(gfp_flags & __GFP_COLD);
  709. int cpu;
  710. again:
  711. cpu = get_cpu();
  712. if (likely(order == 0)) {
  713. struct per_cpu_pages *pcp;
  714. pcp = &zone_pcp(zone, cpu)->pcp[cold];
  715. local_irq_save(flags);
  716. if (!pcp->count) {
  717. pcp->count += rmqueue_bulk(zone, 0,
  718. pcp->batch, &pcp->list);
  719. if (unlikely(!pcp->count))
  720. goto failed;
  721. }
  722. page = list_entry(pcp->list.next, struct page, lru);
  723. list_del(&page->lru);
  724. pcp->count--;
  725. } else {
  726. spin_lock_irqsave(&zone->lock, flags);
  727. page = __rmqueue(zone, order);
  728. spin_unlock(&zone->lock);
  729. if (!page)
  730. goto failed;
  731. }
  732. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  733. zone_statistics(zonelist, zone);
  734. local_irq_restore(flags);
  735. put_cpu();
  736. VM_BUG_ON(bad_range(zone, page));
  737. if (prep_new_page(page, order, gfp_flags))
  738. goto again;
  739. return page;
  740. failed:
  741. local_irq_restore(flags);
  742. put_cpu();
  743. return NULL;
  744. }
  745. #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
  746. #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
  747. #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
  748. #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
  749. #define ALLOC_HARDER 0x10 /* try to alloc harder */
  750. #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
  751. #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
  752. /*
  753. * Return 1 if free pages are above 'mark'. This takes into account the order
  754. * of the allocation.
  755. */
  756. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  757. int classzone_idx, int alloc_flags)
  758. {
  759. /* free_pages my go negative - that's OK */
  760. long min = mark, free_pages = z->free_pages - (1 << order) + 1;
  761. int o;
  762. if (alloc_flags & ALLOC_HIGH)
  763. min -= min / 2;
  764. if (alloc_flags & ALLOC_HARDER)
  765. min -= min / 4;
  766. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  767. return 0;
  768. for (o = 0; o < order; o++) {
  769. /* At the next order, this order's pages become unavailable */
  770. free_pages -= z->free_area[o].nr_free << o;
  771. /* Require fewer higher order pages to be free */
  772. min >>= 1;
  773. if (free_pages <= min)
  774. return 0;
  775. }
  776. return 1;
  777. }
  778. /*
  779. * get_page_from_freeliest goes through the zonelist trying to allocate
  780. * a page.
  781. */
  782. static struct page *
  783. get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
  784. struct zonelist *zonelist, int alloc_flags)
  785. {
  786. struct zone **z = zonelist->zones;
  787. struct page *page = NULL;
  788. int classzone_idx = zone_idx(*z);
  789. struct zone *zone;
  790. /*
  791. * Go through the zonelist once, looking for a zone with enough free.
  792. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  793. */
  794. do {
  795. zone = *z;
  796. if (unlikely((gfp_mask & __GFP_THISNODE) &&
  797. zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
  798. break;
  799. if ((alloc_flags & ALLOC_CPUSET) &&
  800. !cpuset_zone_allowed(zone, gfp_mask))
  801. continue;
  802. if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
  803. unsigned long mark;
  804. if (alloc_flags & ALLOC_WMARK_MIN)
  805. mark = zone->pages_min;
  806. else if (alloc_flags & ALLOC_WMARK_LOW)
  807. mark = zone->pages_low;
  808. else
  809. mark = zone->pages_high;
  810. if (!zone_watermark_ok(zone , order, mark,
  811. classzone_idx, alloc_flags))
  812. if (!zone_reclaim_mode ||
  813. !zone_reclaim(zone, gfp_mask, order))
  814. continue;
  815. }
  816. page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
  817. if (page) {
  818. break;
  819. }
  820. } while (*(++z) != NULL);
  821. return page;
  822. }
  823. /*
  824. * This is the 'heart' of the zoned buddy allocator.
  825. */
  826. struct page * fastcall
  827. __alloc_pages(gfp_t gfp_mask, unsigned int order,
  828. struct zonelist *zonelist)
  829. {
  830. const gfp_t wait = gfp_mask & __GFP_WAIT;
  831. struct zone **z;
  832. struct page *page;
  833. struct reclaim_state reclaim_state;
  834. struct task_struct *p = current;
  835. int do_retry;
  836. int alloc_flags;
  837. int did_some_progress;
  838. might_sleep_if(wait);
  839. restart:
  840. z = zonelist->zones; /* the list of zones suitable for gfp_mask */
  841. if (unlikely(*z == NULL)) {
  842. /* Should this ever happen?? */
  843. return NULL;
  844. }
  845. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  846. zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
  847. if (page)
  848. goto got_pg;
  849. do {
  850. wakeup_kswapd(*z, order);
  851. } while (*(++z));
  852. /*
  853. * OK, we're below the kswapd watermark and have kicked background
  854. * reclaim. Now things get more complex, so set up alloc_flags according
  855. * to how we want to proceed.
  856. *
  857. * The caller may dip into page reserves a bit more if the caller
  858. * cannot run direct reclaim, or if the caller has realtime scheduling
  859. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  860. * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
  861. */
  862. alloc_flags = ALLOC_WMARK_MIN;
  863. if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
  864. alloc_flags |= ALLOC_HARDER;
  865. if (gfp_mask & __GFP_HIGH)
  866. alloc_flags |= ALLOC_HIGH;
  867. if (wait)
  868. alloc_flags |= ALLOC_CPUSET;
  869. /*
  870. * Go through the zonelist again. Let __GFP_HIGH and allocations
  871. * coming from realtime tasks go deeper into reserves.
  872. *
  873. * This is the last chance, in general, before the goto nopage.
  874. * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
  875. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
  876. */
  877. page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
  878. if (page)
  879. goto got_pg;
  880. /* This allocation should allow future memory freeing. */
  881. if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
  882. && !in_interrupt()) {
  883. if (!(gfp_mask & __GFP_NOMEMALLOC)) {
  884. nofail_alloc:
  885. /* go through the zonelist yet again, ignoring mins */
  886. page = get_page_from_freelist(gfp_mask, order,
  887. zonelist, ALLOC_NO_WATERMARKS);
  888. if (page)
  889. goto got_pg;
  890. if (gfp_mask & __GFP_NOFAIL) {
  891. blk_congestion_wait(WRITE, HZ/50);
  892. goto nofail_alloc;
  893. }
  894. }
  895. goto nopage;
  896. }
  897. /* Atomic allocations - we can't balance anything */
  898. if (!wait)
  899. goto nopage;
  900. rebalance:
  901. cond_resched();
  902. /* We now go into synchronous reclaim */
  903. cpuset_memory_pressure_bump();
  904. p->flags |= PF_MEMALLOC;
  905. reclaim_state.reclaimed_slab = 0;
  906. p->reclaim_state = &reclaim_state;
  907. did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
  908. p->reclaim_state = NULL;
  909. p->flags &= ~PF_MEMALLOC;
  910. cond_resched();
  911. if (likely(did_some_progress)) {
  912. page = get_page_from_freelist(gfp_mask, order,
  913. zonelist, alloc_flags);
  914. if (page)
  915. goto got_pg;
  916. } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
  917. /*
  918. * Go through the zonelist yet one more time, keep
  919. * very high watermark here, this is only to catch
  920. * a parallel oom killing, we must fail if we're still
  921. * under heavy pressure.
  922. */
  923. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
  924. zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
  925. if (page)
  926. goto got_pg;
  927. out_of_memory(zonelist, gfp_mask, order);
  928. goto restart;
  929. }
  930. /*
  931. * Don't let big-order allocations loop unless the caller explicitly
  932. * requests that. Wait for some write requests to complete then retry.
  933. *
  934. * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
  935. * <= 3, but that may not be true in other implementations.
  936. */
  937. do_retry = 0;
  938. if (!(gfp_mask & __GFP_NORETRY)) {
  939. if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
  940. do_retry = 1;
  941. if (gfp_mask & __GFP_NOFAIL)
  942. do_retry = 1;
  943. }
  944. if (do_retry) {
  945. blk_congestion_wait(WRITE, HZ/50);
  946. goto rebalance;
  947. }
  948. nopage:
  949. if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
  950. printk(KERN_WARNING "%s: page allocation failure."
  951. " order:%d, mode:0x%x\n",
  952. p->comm, order, gfp_mask);
  953. dump_stack();
  954. show_mem();
  955. }
  956. got_pg:
  957. return page;
  958. }
  959. EXPORT_SYMBOL(__alloc_pages);
  960. /*
  961. * Common helper functions.
  962. */
  963. fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  964. {
  965. struct page * page;
  966. page = alloc_pages(gfp_mask, order);
  967. if (!page)
  968. return 0;
  969. return (unsigned long) page_address(page);
  970. }
  971. EXPORT_SYMBOL(__get_free_pages);
  972. fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
  973. {
  974. struct page * page;
  975. /*
  976. * get_zeroed_page() returns a 32-bit address, which cannot represent
  977. * a highmem page
  978. */
  979. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  980. page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
  981. if (page)
  982. return (unsigned long) page_address(page);
  983. return 0;
  984. }
  985. EXPORT_SYMBOL(get_zeroed_page);
  986. void __pagevec_free(struct pagevec *pvec)
  987. {
  988. int i = pagevec_count(pvec);
  989. while (--i >= 0)
  990. free_hot_cold_page(pvec->pages[i], pvec->cold);
  991. }
  992. fastcall void __free_pages(struct page *page, unsigned int order)
  993. {
  994. if (put_page_testzero(page)) {
  995. if (order == 0)
  996. free_hot_page(page);
  997. else
  998. __free_pages_ok(page, order);
  999. }
  1000. }
  1001. EXPORT_SYMBOL(__free_pages);
  1002. fastcall void free_pages(unsigned long addr, unsigned int order)
  1003. {
  1004. if (addr != 0) {
  1005. VM_BUG_ON(!virt_addr_valid((void *)addr));
  1006. __free_pages(virt_to_page((void *)addr), order);
  1007. }
  1008. }
  1009. EXPORT_SYMBOL(free_pages);
  1010. /*
  1011. * Total amount of free (allocatable) RAM:
  1012. */
  1013. unsigned int nr_free_pages(void)
  1014. {
  1015. unsigned int sum = 0;
  1016. struct zone *zone;
  1017. for_each_zone(zone)
  1018. sum += zone->free_pages;
  1019. return sum;
  1020. }
  1021. EXPORT_SYMBOL(nr_free_pages);
  1022. #ifdef CONFIG_NUMA
  1023. unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
  1024. {
  1025. unsigned int sum = 0;
  1026. enum zone_type i;
  1027. for (i = 0; i < MAX_NR_ZONES; i++)
  1028. sum += pgdat->node_zones[i].free_pages;
  1029. return sum;
  1030. }
  1031. #endif
  1032. static unsigned int nr_free_zone_pages(int offset)
  1033. {
  1034. /* Just pick one node, since fallback list is circular */
  1035. pg_data_t *pgdat = NODE_DATA(numa_node_id());
  1036. unsigned int sum = 0;
  1037. struct zonelist *zonelist = pgdat->node_zonelists + offset;
  1038. struct zone **zonep = zonelist->zones;
  1039. struct zone *zone;
  1040. for (zone = *zonep++; zone; zone = *zonep++) {
  1041. unsigned long size = zone->present_pages;
  1042. unsigned long high = zone->pages_high;
  1043. if (size > high)
  1044. sum += size - high;
  1045. }
  1046. return sum;
  1047. }
  1048. /*
  1049. * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
  1050. */
  1051. unsigned int nr_free_buffer_pages(void)
  1052. {
  1053. return nr_free_zone_pages(gfp_zone(GFP_USER));
  1054. }
  1055. /*
  1056. * Amount of free RAM allocatable within all zones
  1057. */
  1058. unsigned int nr_free_pagecache_pages(void)
  1059. {
  1060. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
  1061. }
  1062. #ifdef CONFIG_NUMA
  1063. static void show_node(struct zone *zone)
  1064. {
  1065. printk("Node %ld ", zone_to_nid(zone));
  1066. }
  1067. #else
  1068. #define show_node(zone) do { } while (0)
  1069. #endif
  1070. void si_meminfo(struct sysinfo *val)
  1071. {
  1072. val->totalram = totalram_pages;
  1073. val->sharedram = 0;
  1074. val->freeram = nr_free_pages();
  1075. val->bufferram = nr_blockdev_pages();
  1076. val->totalhigh = totalhigh_pages;
  1077. val->freehigh = nr_free_highpages();
  1078. val->mem_unit = PAGE_SIZE;
  1079. }
  1080. EXPORT_SYMBOL(si_meminfo);
  1081. #ifdef CONFIG_NUMA
  1082. void si_meminfo_node(struct sysinfo *val, int nid)
  1083. {
  1084. pg_data_t *pgdat = NODE_DATA(nid);
  1085. val->totalram = pgdat->node_present_pages;
  1086. val->freeram = nr_free_pages_pgdat(pgdat);
  1087. #ifdef CONFIG_HIGHMEM
  1088. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
  1089. val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
  1090. #else
  1091. val->totalhigh = 0;
  1092. val->freehigh = 0;
  1093. #endif
  1094. val->mem_unit = PAGE_SIZE;
  1095. }
  1096. #endif
  1097. #define K(x) ((x) << (PAGE_SHIFT-10))
  1098. /*
  1099. * Show free area list (used inside shift_scroll-lock stuff)
  1100. * We also calculate the percentage fragmentation. We do this by counting the
  1101. * memory on each free list with the exception of the first item on the list.
  1102. */
  1103. void show_free_areas(void)
  1104. {
  1105. int cpu, temperature;
  1106. unsigned long active;
  1107. unsigned long inactive;
  1108. unsigned long free;
  1109. struct zone *zone;
  1110. for_each_zone(zone) {
  1111. show_node(zone);
  1112. printk("%s per-cpu:", zone->name);
  1113. if (!populated_zone(zone)) {
  1114. printk(" empty\n");
  1115. continue;
  1116. } else
  1117. printk("\n");
  1118. for_each_online_cpu(cpu) {
  1119. struct per_cpu_pageset *pageset;
  1120. pageset = zone_pcp(zone, cpu);
  1121. for (temperature = 0; temperature < 2; temperature++)
  1122. printk("cpu %d %s: high %d, batch %d used:%d\n",
  1123. cpu,
  1124. temperature ? "cold" : "hot",
  1125. pageset->pcp[temperature].high,
  1126. pageset->pcp[temperature].batch,
  1127. pageset->pcp[temperature].count);
  1128. }
  1129. }
  1130. get_zone_counts(&active, &inactive, &free);
  1131. printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
  1132. "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
  1133. active,
  1134. inactive,
  1135. global_page_state(NR_FILE_DIRTY),
  1136. global_page_state(NR_WRITEBACK),
  1137. global_page_state(NR_UNSTABLE_NFS),
  1138. nr_free_pages(),
  1139. global_page_state(NR_SLAB_RECLAIMABLE) +
  1140. global_page_state(NR_SLAB_UNRECLAIMABLE),
  1141. global_page_state(NR_FILE_MAPPED),
  1142. global_page_state(NR_PAGETABLE));
  1143. for_each_zone(zone) {
  1144. int i;
  1145. show_node(zone);
  1146. printk("%s"
  1147. " free:%lukB"
  1148. " min:%lukB"
  1149. " low:%lukB"
  1150. " high:%lukB"
  1151. " active:%lukB"
  1152. " inactive:%lukB"
  1153. " present:%lukB"
  1154. " pages_scanned:%lu"
  1155. " all_unreclaimable? %s"
  1156. "\n",
  1157. zone->name,
  1158. K(zone->free_pages),
  1159. K(zone->pages_min),
  1160. K(zone->pages_low),
  1161. K(zone->pages_high),
  1162. K(zone->nr_active),
  1163. K(zone->nr_inactive),
  1164. K(zone->present_pages),
  1165. zone->pages_scanned,
  1166. (zone->all_unreclaimable ? "yes" : "no")
  1167. );
  1168. printk("lowmem_reserve[]:");
  1169. for (i = 0; i < MAX_NR_ZONES; i++)
  1170. printk(" %lu", zone->lowmem_reserve[i]);
  1171. printk("\n");
  1172. }
  1173. for_each_zone(zone) {
  1174. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  1175. show_node(zone);
  1176. printk("%s: ", zone->name);
  1177. if (!populated_zone(zone)) {
  1178. printk("empty\n");
  1179. continue;
  1180. }
  1181. spin_lock_irqsave(&zone->lock, flags);
  1182. for (order = 0; order < MAX_ORDER; order++) {
  1183. nr[order] = zone->free_area[order].nr_free;
  1184. total += nr[order] << order;
  1185. }
  1186. spin_unlock_irqrestore(&zone->lock, flags);
  1187. for (order = 0; order < MAX_ORDER; order++)
  1188. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  1189. printk("= %lukB\n", K(total));
  1190. }
  1191. show_swap_cache_info();
  1192. }
  1193. /*
  1194. * Builds allocation fallback zone lists.
  1195. *
  1196. * Add all populated zones of a node to the zonelist.
  1197. */
  1198. static int __meminit build_zonelists_node(pg_data_t *pgdat,
  1199. struct zonelist *zonelist, int nr_zones, enum zone_type zone_type)
  1200. {
  1201. struct zone *zone;
  1202. BUG_ON(zone_type >= MAX_NR_ZONES);
  1203. zone_type++;
  1204. do {
  1205. zone_type--;
  1206. zone = pgdat->node_zones + zone_type;
  1207. if (populated_zone(zone)) {
  1208. zonelist->zones[nr_zones++] = zone;
  1209. check_highest_zone(zone_type);
  1210. }
  1211. } while (zone_type);
  1212. return nr_zones;
  1213. }
  1214. #ifdef CONFIG_NUMA
  1215. #define MAX_NODE_LOAD (num_online_nodes())
  1216. static int __meminitdata node_load[MAX_NUMNODES];
  1217. /**
  1218. * find_next_best_node - find the next node that should appear in a given node's fallback list
  1219. * @node: node whose fallback list we're appending
  1220. * @used_node_mask: nodemask_t of already used nodes
  1221. *
  1222. * We use a number of factors to determine which is the next node that should
  1223. * appear on a given node's fallback list. The node should not have appeared
  1224. * already in @node's fallback list, and it should be the next closest node
  1225. * according to the distance array (which contains arbitrary distance values
  1226. * from each node to each node in the system), and should also prefer nodes
  1227. * with no CPUs, since presumably they'll have very little allocation pressure
  1228. * on them otherwise.
  1229. * It returns -1 if no node is found.
  1230. */
  1231. static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
  1232. {
  1233. int n, val;
  1234. int min_val = INT_MAX;
  1235. int best_node = -1;
  1236. /* Use the local node if we haven't already */
  1237. if (!node_isset(node, *used_node_mask)) {
  1238. node_set(node, *used_node_mask);
  1239. return node;
  1240. }
  1241. for_each_online_node(n) {
  1242. cpumask_t tmp;
  1243. /* Don't want a node to appear more than once */
  1244. if (node_isset(n, *used_node_mask))
  1245. continue;
  1246. /* Use the distance array to find the distance */
  1247. val = node_distance(node, n);
  1248. /* Penalize nodes under us ("prefer the next node") */
  1249. val += (n < node);
  1250. /* Give preference to headless and unused nodes */
  1251. tmp = node_to_cpumask(n);
  1252. if (!cpus_empty(tmp))
  1253. val += PENALTY_FOR_NODE_WITH_CPUS;
  1254. /* Slight preference for less loaded node */
  1255. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  1256. val += node_load[n];
  1257. if (val < min_val) {
  1258. min_val = val;
  1259. best_node = n;
  1260. }
  1261. }
  1262. if (best_node >= 0)
  1263. node_set(best_node, *used_node_mask);
  1264. return best_node;
  1265. }
  1266. static void __meminit build_zonelists(pg_data_t *pgdat)
  1267. {
  1268. int j, node, local_node;
  1269. enum zone_type i;
  1270. int prev_node, load;
  1271. struct zonelist *zonelist;
  1272. nodemask_t used_mask;
  1273. /* initialize zonelists */
  1274. for (i = 0; i < MAX_NR_ZONES; i++) {
  1275. zonelist = pgdat->node_zonelists + i;
  1276. zonelist->zones[0] = NULL;
  1277. }
  1278. /* NUMA-aware ordering of nodes */
  1279. local_node = pgdat->node_id;
  1280. load = num_online_nodes();
  1281. prev_node = local_node;
  1282. nodes_clear(used_mask);
  1283. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  1284. int distance = node_distance(local_node, node);
  1285. /*
  1286. * If another node is sufficiently far away then it is better
  1287. * to reclaim pages in a zone before going off node.
  1288. */
  1289. if (distance > RECLAIM_DISTANCE)
  1290. zone_reclaim_mode = 1;
  1291. /*
  1292. * We don't want to pressure a particular node.
  1293. * So adding penalty to the first node in same
  1294. * distance group to make it round-robin.
  1295. */
  1296. if (distance != node_distance(local_node, prev_node))
  1297. node_load[node] += load;
  1298. prev_node = node;
  1299. load--;
  1300. for (i = 0; i < MAX_NR_ZONES; i++) {
  1301. zonelist = pgdat->node_zonelists + i;
  1302. for (j = 0; zonelist->zones[j] != NULL; j++);
  1303. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1304. zonelist->zones[j] = NULL;
  1305. }
  1306. }
  1307. }
  1308. #else /* CONFIG_NUMA */
  1309. static void __meminit build_zonelists(pg_data_t *pgdat)
  1310. {
  1311. int node, local_node;
  1312. enum zone_type i,j;
  1313. local_node = pgdat->node_id;
  1314. for (i = 0; i < MAX_NR_ZONES; i++) {
  1315. struct zonelist *zonelist;
  1316. zonelist = pgdat->node_zonelists + i;
  1317. j = build_zonelists_node(pgdat, zonelist, 0, i);
  1318. /*
  1319. * Now we build the zonelist so that it contains the zones
  1320. * of all the other nodes.
  1321. * We don't want to pressure a particular node, so when
  1322. * building the zones for node N, we make sure that the
  1323. * zones coming right after the local ones are those from
  1324. * node N+1 (modulo N)
  1325. */
  1326. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  1327. if (!node_online(node))
  1328. continue;
  1329. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1330. }
  1331. for (node = 0; node < local_node; node++) {
  1332. if (!node_online(node))
  1333. continue;
  1334. j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
  1335. }
  1336. zonelist->zones[j] = NULL;
  1337. }
  1338. }
  1339. #endif /* CONFIG_NUMA */
  1340. /* return values int ....just for stop_machine_run() */
  1341. static int __meminit __build_all_zonelists(void *dummy)
  1342. {
  1343. int nid;
  1344. for_each_online_node(nid)
  1345. build_zonelists(NODE_DATA(nid));
  1346. return 0;
  1347. }
  1348. void __meminit build_all_zonelists(void)
  1349. {
  1350. if (system_state == SYSTEM_BOOTING) {
  1351. __build_all_zonelists(0);
  1352. cpuset_init_current_mems_allowed();
  1353. } else {
  1354. /* we have to stop all cpus to guaranntee there is no user
  1355. of zonelist */
  1356. stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
  1357. /* cpuset refresh routine should be here */
  1358. }
  1359. vm_total_pages = nr_free_pagecache_pages();
  1360. printk("Built %i zonelists. Total pages: %ld\n",
  1361. num_online_nodes(), vm_total_pages);
  1362. }
  1363. /*
  1364. * Helper functions to size the waitqueue hash table.
  1365. * Essentially these want to choose hash table sizes sufficiently
  1366. * large so that collisions trying to wait on pages are rare.
  1367. * But in fact, the number of active page waitqueues on typical
  1368. * systems is ridiculously low, less than 200. So this is even
  1369. * conservative, even though it seems large.
  1370. *
  1371. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  1372. * waitqueues, i.e. the size of the waitq table given the number of pages.
  1373. */
  1374. #define PAGES_PER_WAITQUEUE 256
  1375. #ifndef CONFIG_MEMORY_HOTPLUG
  1376. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  1377. {
  1378. unsigned long size = 1;
  1379. pages /= PAGES_PER_WAITQUEUE;
  1380. while (size < pages)
  1381. size <<= 1;
  1382. /*
  1383. * Once we have dozens or even hundreds of threads sleeping
  1384. * on IO we've got bigger problems than wait queue collision.
  1385. * Limit the size of the wait table to a reasonable size.
  1386. */
  1387. size = min(size, 4096UL);
  1388. return max(size, 4UL);
  1389. }
  1390. #else
  1391. /*
  1392. * A zone's size might be changed by hot-add, so it is not possible to determine
  1393. * a suitable size for its wait_table. So we use the maximum size now.
  1394. *
  1395. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  1396. *
  1397. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  1398. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  1399. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  1400. *
  1401. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  1402. * or more by the traditional way. (See above). It equals:
  1403. *
  1404. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  1405. * ia64(16K page size) : = ( 8G + 4M)byte.
  1406. * powerpc (64K page size) : = (32G +16M)byte.
  1407. */
  1408. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  1409. {
  1410. return 4096UL;
  1411. }
  1412. #endif
  1413. /*
  1414. * This is an integer logarithm so that shifts can be used later
  1415. * to extract the more random high bits from the multiplicative
  1416. * hash function before the remainder is taken.
  1417. */
  1418. static inline unsigned long wait_table_bits(unsigned long size)
  1419. {
  1420. return ffz(~size);
  1421. }
  1422. #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
  1423. static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
  1424. unsigned long *zones_size, unsigned long *zholes_size)
  1425. {
  1426. unsigned long realtotalpages, totalpages = 0;
  1427. enum zone_type i;
  1428. for (i = 0; i < MAX_NR_ZONES; i++)
  1429. totalpages += zones_size[i];
  1430. pgdat->node_spanned_pages = totalpages;
  1431. realtotalpages = totalpages;
  1432. if (zholes_size)
  1433. for (i = 0; i < MAX_NR_ZONES; i++)
  1434. realtotalpages -= zholes_size[i];
  1435. pgdat->node_present_pages = realtotalpages;
  1436. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
  1437. }
  1438. /*
  1439. * Initially all pages are reserved - free ones are freed
  1440. * up by free_all_bootmem() once the early boot process is
  1441. * done. Non-atomic initialization, single-pass.
  1442. */
  1443. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  1444. unsigned long start_pfn)
  1445. {
  1446. struct page *page;
  1447. unsigned long end_pfn = start_pfn + size;
  1448. unsigned long pfn;
  1449. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1450. if (!early_pfn_valid(pfn))
  1451. continue;
  1452. page = pfn_to_page(pfn);
  1453. set_page_links(page, zone, nid, pfn);
  1454. init_page_count(page);
  1455. reset_page_mapcount(page);
  1456. SetPageReserved(page);
  1457. INIT_LIST_HEAD(&page->lru);
  1458. #ifdef WANT_PAGE_VIRTUAL
  1459. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  1460. if (!is_highmem_idx(zone))
  1461. set_page_address(page, __va(pfn << PAGE_SHIFT));
  1462. #endif
  1463. }
  1464. }
  1465. void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
  1466. unsigned long size)
  1467. {
  1468. int order;
  1469. for (order = 0; order < MAX_ORDER ; order++) {
  1470. INIT_LIST_HEAD(&zone->free_area[order].free_list);
  1471. zone->free_area[order].nr_free = 0;
  1472. }
  1473. }
  1474. #define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
  1475. void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
  1476. unsigned long pfn, unsigned long size)
  1477. {
  1478. unsigned long snum = pfn_to_section_nr(pfn);
  1479. unsigned long end = pfn_to_section_nr(pfn + size);
  1480. if (FLAGS_HAS_NODE)
  1481. zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
  1482. else
  1483. for (; snum <= end; snum++)
  1484. zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
  1485. }
  1486. #ifndef __HAVE_ARCH_MEMMAP_INIT
  1487. #define memmap_init(size, nid, zone, start_pfn) \
  1488. memmap_init_zone((size), (nid), (zone), (start_pfn))
  1489. #endif
  1490. static int __cpuinit zone_batchsize(struct zone *zone)
  1491. {
  1492. int batch;
  1493. /*
  1494. * The per-cpu-pages pools are set to around 1000th of the
  1495. * size of the zone. But no more than 1/2 of a meg.
  1496. *
  1497. * OK, so we don't know how big the cache is. So guess.
  1498. */
  1499. batch = zone->present_pages / 1024;
  1500. if (batch * PAGE_SIZE > 512 * 1024)
  1501. batch = (512 * 1024) / PAGE_SIZE;
  1502. batch /= 4; /* We effectively *= 4 below */
  1503. if (batch < 1)
  1504. batch = 1;
  1505. /*
  1506. * Clamp the batch to a 2^n - 1 value. Having a power
  1507. * of 2 value was found to be more likely to have
  1508. * suboptimal cache aliasing properties in some cases.
  1509. *
  1510. * For example if 2 tasks are alternately allocating
  1511. * batches of pages, one task can end up with a lot
  1512. * of pages of one half of the possible page colors
  1513. * and the other with pages of the other colors.
  1514. */
  1515. batch = (1 << (fls(batch + batch/2)-1)) - 1;
  1516. return batch;
  1517. }
  1518. inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  1519. {
  1520. struct per_cpu_pages *pcp;
  1521. memset(p, 0, sizeof(*p));
  1522. pcp = &p->pcp[0]; /* hot */
  1523. pcp->count = 0;
  1524. pcp->high = 6 * batch;
  1525. pcp->batch = max(1UL, 1 * batch);
  1526. INIT_LIST_HEAD(&pcp->list);
  1527. pcp = &p->pcp[1]; /* cold*/
  1528. pcp->count = 0;
  1529. pcp->high = 2 * batch;
  1530. pcp->batch = max(1UL, batch/2);
  1531. INIT_LIST_HEAD(&pcp->list);
  1532. }
  1533. /*
  1534. * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
  1535. * to the value high for the pageset p.
  1536. */
  1537. static void setup_pagelist_highmark(struct per_cpu_pageset *p,
  1538. unsigned long high)
  1539. {
  1540. struct per_cpu_pages *pcp;
  1541. pcp = &p->pcp[0]; /* hot list */
  1542. pcp->high = high;
  1543. pcp->batch = max(1UL, high/4);
  1544. if ((high/4) > (PAGE_SHIFT * 8))
  1545. pcp->batch = PAGE_SHIFT * 8;
  1546. }
  1547. #ifdef CONFIG_NUMA
  1548. /*
  1549. * Boot pageset table. One per cpu which is going to be used for all
  1550. * zones and all nodes. The parameters will be set in such a way
  1551. * that an item put on a list will immediately be handed over to
  1552. * the buddy list. This is safe since pageset manipulation is done
  1553. * with interrupts disabled.
  1554. *
  1555. * Some NUMA counter updates may also be caught by the boot pagesets.
  1556. *
  1557. * The boot_pagesets must be kept even after bootup is complete for
  1558. * unused processors and/or zones. They do play a role for bootstrapping
  1559. * hotplugged processors.
  1560. *
  1561. * zoneinfo_show() and maybe other functions do
  1562. * not check if the processor is online before following the pageset pointer.
  1563. * Other parts of the kernel may not check if the zone is available.
  1564. */
  1565. static struct per_cpu_pageset boot_pageset[NR_CPUS];
  1566. /*
  1567. * Dynamically allocate memory for the
  1568. * per cpu pageset array in struct zone.
  1569. */
  1570. static int __cpuinit process_zones(int cpu)
  1571. {
  1572. struct zone *zone, *dzone;
  1573. for_each_zone(zone) {
  1574. zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
  1575. GFP_KERNEL, cpu_to_node(cpu));
  1576. if (!zone_pcp(zone, cpu))
  1577. goto bad;
  1578. setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
  1579. if (percpu_pagelist_fraction)
  1580. setup_pagelist_highmark(zone_pcp(zone, cpu),
  1581. (zone->present_pages / percpu_pagelist_fraction));
  1582. }
  1583. return 0;
  1584. bad:
  1585. for_each_zone(dzone) {
  1586. if (dzone == zone)
  1587. break;
  1588. kfree(zone_pcp(dzone, cpu));
  1589. zone_pcp(dzone, cpu) = NULL;
  1590. }
  1591. return -ENOMEM;
  1592. }
  1593. static inline void free_zone_pagesets(int cpu)
  1594. {
  1595. struct zone *zone;
  1596. for_each_zone(zone) {
  1597. struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
  1598. /* Free per_cpu_pageset if it is slab allocated */
  1599. if (pset != &boot_pageset[cpu])
  1600. kfree(pset);
  1601. zone_pcp(zone, cpu) = NULL;
  1602. }
  1603. }
  1604. static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
  1605. unsigned long action,
  1606. void *hcpu)
  1607. {
  1608. int cpu = (long)hcpu;
  1609. int ret = NOTIFY_OK;
  1610. switch (action) {
  1611. case CPU_UP_PREPARE:
  1612. if (process_zones(cpu))
  1613. ret = NOTIFY_BAD;
  1614. break;
  1615. case CPU_UP_CANCELED:
  1616. case CPU_DEAD:
  1617. free_zone_pagesets(cpu);
  1618. break;
  1619. default:
  1620. break;
  1621. }
  1622. return ret;
  1623. }
  1624. static struct notifier_block __cpuinitdata pageset_notifier =
  1625. { &pageset_cpuup_callback, NULL, 0 };
  1626. void __init setup_per_cpu_pageset(void)
  1627. {
  1628. int err;
  1629. /* Initialize per_cpu_pageset for cpu 0.
  1630. * A cpuup callback will do this for every cpu
  1631. * as it comes online
  1632. */
  1633. err = process_zones(smp_processor_id());
  1634. BUG_ON(err);
  1635. register_cpu_notifier(&pageset_notifier);
  1636. }
  1637. #endif
  1638. static __meminit
  1639. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  1640. {
  1641. int i;
  1642. struct pglist_data *pgdat = zone->zone_pgdat;
  1643. size_t alloc_size;
  1644. /*
  1645. * The per-page waitqueue mechanism uses hashed waitqueues
  1646. * per zone.
  1647. */
  1648. zone->wait_table_hash_nr_entries =
  1649. wait_table_hash_nr_entries(zone_size_pages);
  1650. zone->wait_table_bits =
  1651. wait_table_bits(zone->wait_table_hash_nr_entries);
  1652. alloc_size = zone->wait_table_hash_nr_entries
  1653. * sizeof(wait_queue_head_t);
  1654. if (system_state == SYSTEM_BOOTING) {
  1655. zone->wait_table = (wait_queue_head_t *)
  1656. alloc_bootmem_node(pgdat, alloc_size);
  1657. } else {
  1658. /*
  1659. * This case means that a zone whose size was 0 gets new memory
  1660. * via memory hot-add.
  1661. * But it may be the case that a new node was hot-added. In
  1662. * this case vmalloc() will not be able to use this new node's
  1663. * memory - this wait_table must be initialized to use this new
  1664. * node itself as well.
  1665. * To use this new node's memory, further consideration will be
  1666. * necessary.
  1667. */
  1668. zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
  1669. }
  1670. if (!zone->wait_table)
  1671. return -ENOMEM;
  1672. for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  1673. init_waitqueue_head(zone->wait_table + i);
  1674. return 0;
  1675. }
  1676. static __meminit void zone_pcp_init(struct zone *zone)
  1677. {
  1678. int cpu;
  1679. unsigned long batch = zone_batchsize(zone);
  1680. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  1681. #ifdef CONFIG_NUMA
  1682. /* Early boot. Slab allocator not functional yet */
  1683. zone_pcp(zone, cpu) = &boot_pageset[cpu];
  1684. setup_pageset(&boot_pageset[cpu],0);
  1685. #else
  1686. setup_pageset(zone_pcp(zone,cpu), batch);
  1687. #endif
  1688. }
  1689. if (zone->present_pages)
  1690. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
  1691. zone->name, zone->present_pages, batch);
  1692. }
  1693. __meminit int init_currently_empty_zone(struct zone *zone,
  1694. unsigned long zone_start_pfn,
  1695. unsigned long size)
  1696. {
  1697. struct pglist_data *pgdat = zone->zone_pgdat;
  1698. int ret;
  1699. ret = zone_wait_table_init(zone, size);
  1700. if (ret)
  1701. return ret;
  1702. pgdat->nr_zones = zone_idx(zone) + 1;
  1703. zone->zone_start_pfn = zone_start_pfn;
  1704. memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
  1705. zone_init_free_lists(pgdat, zone, zone->spanned_pages);
  1706. return 0;
  1707. }
  1708. /*
  1709. * Set up the zone data structures:
  1710. * - mark all pages reserved
  1711. * - mark all memory queues empty
  1712. * - clear the memory bitmaps
  1713. */
  1714. static void __meminit free_area_init_core(struct pglist_data *pgdat,
  1715. unsigned long *zones_size, unsigned long *zholes_size)
  1716. {
  1717. enum zone_type j;
  1718. int nid = pgdat->node_id;
  1719. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  1720. int ret;
  1721. pgdat_resize_init(pgdat);
  1722. pgdat->nr_zones = 0;
  1723. init_waitqueue_head(&pgdat->kswapd_wait);
  1724. pgdat->kswapd_max_order = 0;
  1725. for (j = 0; j < MAX_NR_ZONES; j++) {
  1726. struct zone *zone = pgdat->node_zones + j;
  1727. unsigned long size, realsize;
  1728. realsize = size = zones_size[j];
  1729. if (zholes_size)
  1730. realsize -= zholes_size[j];
  1731. if (!is_highmem_idx(j))
  1732. nr_kernel_pages += realsize;
  1733. nr_all_pages += realsize;
  1734. zone->spanned_pages = size;
  1735. zone->present_pages = realsize;
  1736. #ifdef CONFIG_NUMA
  1737. zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
  1738. / 100;
  1739. zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
  1740. #endif
  1741. zone->name = zone_names[j];
  1742. spin_lock_init(&zone->lock);
  1743. spin_lock_init(&zone->lru_lock);
  1744. zone_seqlock_init(zone);
  1745. zone->zone_pgdat = pgdat;
  1746. zone->free_pages = 0;
  1747. zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
  1748. zone_pcp_init(zone);
  1749. INIT_LIST_HEAD(&zone->active_list);
  1750. INIT_LIST_HEAD(&zone->inactive_list);
  1751. zone->nr_scan_active = 0;
  1752. zone->nr_scan_inactive = 0;
  1753. zone->nr_active = 0;
  1754. zone->nr_inactive = 0;
  1755. zap_zone_vm_stats(zone);
  1756. atomic_set(&zone->reclaim_in_progress, 0);
  1757. if (!size)
  1758. continue;
  1759. zonetable_add(zone, nid, j, zone_start_pfn, size);
  1760. ret = init_currently_empty_zone(zone, zone_start_pfn, size);
  1761. BUG_ON(ret);
  1762. zone_start_pfn += size;
  1763. }
  1764. }
  1765. static void __init alloc_node_mem_map(struct pglist_data *pgdat)
  1766. {
  1767. /* Skip empty nodes */
  1768. if (!pgdat->node_spanned_pages)
  1769. return;
  1770. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  1771. /* ia64 gets its own node_mem_map, before this, without bootmem */
  1772. if (!pgdat->node_mem_map) {
  1773. unsigned long size, start, end;
  1774. struct page *map;
  1775. /*
  1776. * The zone's endpoints aren't required to be MAX_ORDER
  1777. * aligned but the node_mem_map endpoints must be in order
  1778. * for the buddy allocator to function correctly.
  1779. */
  1780. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  1781. end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  1782. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  1783. size = (end - start) * sizeof(struct page);
  1784. map = alloc_remap(pgdat->node_id, size);
  1785. if (!map)
  1786. map = alloc_bootmem_node(pgdat, size);
  1787. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  1788. }
  1789. #ifdef CONFIG_FLATMEM
  1790. /*
  1791. * With no DISCONTIG, the global mem_map is just set as node 0's
  1792. */
  1793. if (pgdat == NODE_DATA(0))
  1794. mem_map = NODE_DATA(0)->node_mem_map;
  1795. #endif
  1796. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  1797. }
  1798. void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
  1799. unsigned long *zones_size, unsigned long node_start_pfn,
  1800. unsigned long *zholes_size)
  1801. {
  1802. pgdat->node_id = nid;
  1803. pgdat->node_start_pfn = node_start_pfn;
  1804. calculate_zone_totalpages(pgdat, zones_size, zholes_size);
  1805. alloc_node_mem_map(pgdat);
  1806. free_area_init_core(pgdat, zones_size, zholes_size);
  1807. }
  1808. #ifndef CONFIG_NEED_MULTIPLE_NODES
  1809. static bootmem_data_t contig_bootmem_data;
  1810. struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
  1811. EXPORT_SYMBOL(contig_page_data);
  1812. #endif
  1813. void __init free_area_init(unsigned long *zones_size)
  1814. {
  1815. free_area_init_node(0, NODE_DATA(0), zones_size,
  1816. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  1817. }
  1818. #ifdef CONFIG_HOTPLUG_CPU
  1819. static int page_alloc_cpu_notify(struct notifier_block *self,
  1820. unsigned long action, void *hcpu)
  1821. {
  1822. int cpu = (unsigned long)hcpu;
  1823. if (action == CPU_DEAD) {
  1824. local_irq_disable();
  1825. __drain_pages(cpu);
  1826. vm_events_fold_cpu(cpu);
  1827. local_irq_enable();
  1828. refresh_cpu_vm_stats(cpu);
  1829. }
  1830. return NOTIFY_OK;
  1831. }
  1832. #endif /* CONFIG_HOTPLUG_CPU */
  1833. void __init page_alloc_init(void)
  1834. {
  1835. hotcpu_notifier(page_alloc_cpu_notify, 0);
  1836. }
  1837. /*
  1838. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  1839. * or min_free_kbytes changes.
  1840. */
  1841. static void calculate_totalreserve_pages(void)
  1842. {
  1843. struct pglist_data *pgdat;
  1844. unsigned long reserve_pages = 0;
  1845. enum zone_type i, j;
  1846. for_each_online_pgdat(pgdat) {
  1847. for (i = 0; i < MAX_NR_ZONES; i++) {
  1848. struct zone *zone = pgdat->node_zones + i;
  1849. unsigned long max = 0;
  1850. /* Find valid and maximum lowmem_reserve in the zone */
  1851. for (j = i; j < MAX_NR_ZONES; j++) {
  1852. if (zone->lowmem_reserve[j] > max)
  1853. max = zone->lowmem_reserve[j];
  1854. }
  1855. /* we treat pages_high as reserved pages. */
  1856. max += zone->pages_high;
  1857. if (max > zone->present_pages)
  1858. max = zone->present_pages;
  1859. reserve_pages += max;
  1860. }
  1861. }
  1862. totalreserve_pages = reserve_pages;
  1863. }
  1864. /*
  1865. * setup_per_zone_lowmem_reserve - called whenever
  1866. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  1867. * has a correct pages reserved value, so an adequate number of
  1868. * pages are left in the zone after a successful __alloc_pages().
  1869. */
  1870. static void setup_per_zone_lowmem_reserve(void)
  1871. {
  1872. struct pglist_data *pgdat;
  1873. enum zone_type j, idx;
  1874. for_each_online_pgdat(pgdat) {
  1875. for (j = 0; j < MAX_NR_ZONES; j++) {
  1876. struct zone *zone = pgdat->node_zones + j;
  1877. unsigned long present_pages = zone->present_pages;
  1878. zone->lowmem_reserve[j] = 0;
  1879. idx = j;
  1880. while (idx) {
  1881. struct zone *lower_zone;
  1882. idx--;
  1883. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  1884. sysctl_lowmem_reserve_ratio[idx] = 1;
  1885. lower_zone = pgdat->node_zones + idx;
  1886. lower_zone->lowmem_reserve[j] = present_pages /
  1887. sysctl_lowmem_reserve_ratio[idx];
  1888. present_pages += lower_zone->present_pages;
  1889. }
  1890. }
  1891. }
  1892. /* update totalreserve_pages */
  1893. calculate_totalreserve_pages();
  1894. }
  1895. /*
  1896. * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
  1897. * that the pages_{min,low,high} values for each zone are set correctly
  1898. * with respect to min_free_kbytes.
  1899. */
  1900. void setup_per_zone_pages_min(void)
  1901. {
  1902. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  1903. unsigned long lowmem_pages = 0;
  1904. struct zone *zone;
  1905. unsigned long flags;
  1906. /* Calculate total number of !ZONE_HIGHMEM pages */
  1907. for_each_zone(zone) {
  1908. if (!is_highmem(zone))
  1909. lowmem_pages += zone->present_pages;
  1910. }
  1911. for_each_zone(zone) {
  1912. u64 tmp;
  1913. spin_lock_irqsave(&zone->lru_lock, flags);
  1914. tmp = (u64)pages_min * zone->present_pages;
  1915. do_div(tmp, lowmem_pages);
  1916. if (is_highmem(zone)) {
  1917. /*
  1918. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  1919. * need highmem pages, so cap pages_min to a small
  1920. * value here.
  1921. *
  1922. * The (pages_high-pages_low) and (pages_low-pages_min)
  1923. * deltas controls asynch page reclaim, and so should
  1924. * not be capped for highmem.
  1925. */
  1926. int min_pages;
  1927. min_pages = zone->present_pages / 1024;
  1928. if (min_pages < SWAP_CLUSTER_MAX)
  1929. min_pages = SWAP_CLUSTER_MAX;
  1930. if (min_pages > 128)
  1931. min_pages = 128;
  1932. zone->pages_min = min_pages;
  1933. } else {
  1934. /*
  1935. * If it's a lowmem zone, reserve a number of pages
  1936. * proportionate to the zone's size.
  1937. */
  1938. zone->pages_min = tmp;
  1939. }
  1940. zone->pages_low = zone->pages_min + (tmp >> 2);
  1941. zone->pages_high = zone->pages_min + (tmp >> 1);
  1942. spin_unlock_irqrestore(&zone->lru_lock, flags);
  1943. }
  1944. /* update totalreserve_pages */
  1945. calculate_totalreserve_pages();
  1946. }
  1947. /*
  1948. * Initialise min_free_kbytes.
  1949. *
  1950. * For small machines we want it small (128k min). For large machines
  1951. * we want it large (64MB max). But it is not linear, because network
  1952. * bandwidth does not increase linearly with machine size. We use
  1953. *
  1954. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  1955. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  1956. *
  1957. * which yields
  1958. *
  1959. * 16MB: 512k
  1960. * 32MB: 724k
  1961. * 64MB: 1024k
  1962. * 128MB: 1448k
  1963. * 256MB: 2048k
  1964. * 512MB: 2896k
  1965. * 1024MB: 4096k
  1966. * 2048MB: 5792k
  1967. * 4096MB: 8192k
  1968. * 8192MB: 11584k
  1969. * 16384MB: 16384k
  1970. */
  1971. static int __init init_per_zone_pages_min(void)
  1972. {
  1973. unsigned long lowmem_kbytes;
  1974. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  1975. min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  1976. if (min_free_kbytes < 128)
  1977. min_free_kbytes = 128;
  1978. if (min_free_kbytes > 65536)
  1979. min_free_kbytes = 65536;
  1980. setup_per_zone_pages_min();
  1981. setup_per_zone_lowmem_reserve();
  1982. return 0;
  1983. }
  1984. module_init(init_per_zone_pages_min)
  1985. /*
  1986. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  1987. * that we can call two helper functions whenever min_free_kbytes
  1988. * changes.
  1989. */
  1990. int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
  1991. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  1992. {
  1993. proc_dointvec(table, write, file, buffer, length, ppos);
  1994. setup_per_zone_pages_min();
  1995. return 0;
  1996. }
  1997. #ifdef CONFIG_NUMA
  1998. int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
  1999. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2000. {
  2001. struct zone *zone;
  2002. int rc;
  2003. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2004. if (rc)
  2005. return rc;
  2006. for_each_zone(zone)
  2007. zone->min_unmapped_pages = (zone->present_pages *
  2008. sysctl_min_unmapped_ratio) / 100;
  2009. return 0;
  2010. }
  2011. int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  2012. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2013. {
  2014. struct zone *zone;
  2015. int rc;
  2016. rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2017. if (rc)
  2018. return rc;
  2019. for_each_zone(zone)
  2020. zone->min_slab_pages = (zone->present_pages *
  2021. sysctl_min_slab_ratio) / 100;
  2022. return 0;
  2023. }
  2024. #endif
  2025. /*
  2026. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  2027. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  2028. * whenever sysctl_lowmem_reserve_ratio changes.
  2029. *
  2030. * The reserve ratio obviously has absolutely no relation with the
  2031. * pages_min watermarks. The lowmem reserve ratio can only make sense
  2032. * if in function of the boot time zone sizes.
  2033. */
  2034. int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  2035. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2036. {
  2037. proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2038. setup_per_zone_lowmem_reserve();
  2039. return 0;
  2040. }
  2041. /*
  2042. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  2043. * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
  2044. * can have before it gets flushed back to buddy allocator.
  2045. */
  2046. int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
  2047. struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
  2048. {
  2049. struct zone *zone;
  2050. unsigned int cpu;
  2051. int ret;
  2052. ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
  2053. if (!write || (ret == -EINVAL))
  2054. return ret;
  2055. for_each_zone(zone) {
  2056. for_each_online_cpu(cpu) {
  2057. unsigned long high;
  2058. high = zone->present_pages / percpu_pagelist_fraction;
  2059. setup_pagelist_highmark(zone_pcp(zone, cpu), high);
  2060. }
  2061. }
  2062. return 0;
  2063. }
  2064. int hashdist = HASHDIST_DEFAULT;
  2065. #ifdef CONFIG_NUMA
  2066. static int __init set_hashdist(char *str)
  2067. {
  2068. if (!str)
  2069. return 0;
  2070. hashdist = simple_strtoul(str, &str, 0);
  2071. return 1;
  2072. }
  2073. __setup("hashdist=", set_hashdist);
  2074. #endif
  2075. /*
  2076. * allocate a large system hash table from bootmem
  2077. * - it is assumed that the hash table must contain an exact power-of-2
  2078. * quantity of entries
  2079. * - limit is the number of hash buckets, not the total allocation size
  2080. */
  2081. void *__init alloc_large_system_hash(const char *tablename,
  2082. unsigned long bucketsize,
  2083. unsigned long numentries,
  2084. int scale,
  2085. int flags,
  2086. unsigned int *_hash_shift,
  2087. unsigned int *_hash_mask,
  2088. unsigned long limit)
  2089. {
  2090. unsigned long long max = limit;
  2091. unsigned long log2qty, size;
  2092. void *table = NULL;
  2093. /* allow the kernel cmdline to have a say */
  2094. if (!numentries) {
  2095. /* round applicable memory size up to nearest megabyte */
  2096. numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
  2097. numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
  2098. numentries >>= 20 - PAGE_SHIFT;
  2099. numentries <<= 20 - PAGE_SHIFT;
  2100. /* limit to 1 bucket per 2^scale bytes of low memory */
  2101. if (scale > PAGE_SHIFT)
  2102. numentries >>= (scale - PAGE_SHIFT);
  2103. else
  2104. numentries <<= (PAGE_SHIFT - scale);
  2105. }
  2106. numentries = roundup_pow_of_two(numentries);
  2107. /* limit allocation size to 1/16 total memory by default */
  2108. if (max == 0) {
  2109. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  2110. do_div(max, bucketsize);
  2111. }
  2112. if (numentries > max)
  2113. numentries = max;
  2114. log2qty = long_log2(numentries);
  2115. do {
  2116. size = bucketsize << log2qty;
  2117. if (flags & HASH_EARLY)
  2118. table = alloc_bootmem(size);
  2119. else if (hashdist)
  2120. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  2121. else {
  2122. unsigned long order;
  2123. for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
  2124. ;
  2125. table = (void*) __get_free_pages(GFP_ATOMIC, order);
  2126. }
  2127. } while (!table && size > PAGE_SIZE && --log2qty);
  2128. if (!table)
  2129. panic("Failed to allocate %s hash table\n", tablename);
  2130. printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
  2131. tablename,
  2132. (1U << log2qty),
  2133. long_log2(size) - PAGE_SHIFT,
  2134. size);
  2135. if (_hash_shift)
  2136. *_hash_shift = log2qty;
  2137. if (_hash_mask)
  2138. *_hash_mask = (1 << log2qty) - 1;
  2139. return table;
  2140. }
  2141. #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
  2142. struct page *pfn_to_page(unsigned long pfn)
  2143. {
  2144. return __pfn_to_page(pfn);
  2145. }
  2146. unsigned long page_to_pfn(struct page *page)
  2147. {
  2148. return __page_to_pfn(page);
  2149. }
  2150. EXPORT_SYMBOL(pfn_to_page);
  2151. EXPORT_SYMBOL(page_to_pfn);
  2152. #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */