bootmem.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /*
  2. * bootmem - A boot-time physical memory allocator and configurator
  3. *
  4. * Copyright (C) 1999 Ingo Molnar
  5. * 1999 Kanoj Sarcar, SGI
  6. * 2008 Johannes Weiner
  7. *
  8. * Access to this subsystem has to be serialized externally (which is true
  9. * for the boot process anyway).
  10. */
  11. #include <linux/init.h>
  12. #include <linux/pfn.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/module.h>
  15. #include <asm/bug.h>
  16. #include <asm/io.h>
  17. #include <asm/processor.h>
  18. #include "internal.h"
  19. unsigned long max_low_pfn;
  20. unsigned long min_low_pfn;
  21. unsigned long max_pfn;
  22. static LIST_HEAD(bdata_list);
  23. #ifdef CONFIG_CRASH_DUMP
  24. /*
  25. * If we have booted due to a crash, max_pfn will be a very low value. We need
  26. * to know the amount of memory that the previous kernel used.
  27. */
  28. unsigned long saved_max_pfn;
  29. #endif
  30. bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
  31. /*
  32. * Given an initialised bdata, it returns the size of the boot bitmap
  33. */
  34. static unsigned long __init get_mapsize(bootmem_data_t *bdata)
  35. {
  36. unsigned long mapsize;
  37. unsigned long start = PFN_DOWN(bdata->node_boot_start);
  38. unsigned long end = bdata->node_low_pfn;
  39. mapsize = ((end - start) + 7) / 8;
  40. return ALIGN(mapsize, sizeof(long));
  41. }
  42. /**
  43. * bootmem_bootmap_pages - calculate bitmap size in pages
  44. * @pages: number of pages the bitmap has to represent
  45. */
  46. unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  47. {
  48. unsigned long mapsize;
  49. mapsize = (pages+7)/8;
  50. mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
  51. mapsize >>= PAGE_SHIFT;
  52. return mapsize;
  53. }
  54. /*
  55. * link bdata in order
  56. */
  57. static void __init link_bootmem(bootmem_data_t *bdata)
  58. {
  59. bootmem_data_t *ent;
  60. if (list_empty(&bdata_list)) {
  61. list_add(&bdata->list, &bdata_list);
  62. return;
  63. }
  64. /* insert in order */
  65. list_for_each_entry(ent, &bdata_list, list) {
  66. if (bdata->node_boot_start < ent->node_boot_start) {
  67. list_add_tail(&bdata->list, &ent->list);
  68. return;
  69. }
  70. }
  71. list_add_tail(&bdata->list, &bdata_list);
  72. }
  73. /*
  74. * Called once to set up the allocator itself.
  75. */
  76. static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
  77. unsigned long mapstart, unsigned long start, unsigned long end)
  78. {
  79. unsigned long mapsize;
  80. mminit_validate_memmodel_limits(&start, &end);
  81. bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
  82. bdata->node_boot_start = PFN_PHYS(start);
  83. bdata->node_low_pfn = end;
  84. link_bootmem(bdata);
  85. /*
  86. * Initially all pages are reserved - setup_arch() has to
  87. * register free RAM areas explicitly.
  88. */
  89. mapsize = get_mapsize(bdata);
  90. memset(bdata->node_bootmem_map, 0xff, mapsize);
  91. return mapsize;
  92. }
  93. /**
  94. * init_bootmem_node - register a node as boot memory
  95. * @pgdat: node to register
  96. * @freepfn: pfn where the bitmap for this node is to be placed
  97. * @startpfn: first pfn on the node
  98. * @endpfn: first pfn after the node
  99. *
  100. * Returns the number of bytes needed to hold the bitmap for this node.
  101. */
  102. unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
  103. unsigned long startpfn, unsigned long endpfn)
  104. {
  105. return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
  106. }
  107. /**
  108. * init_bootmem - register boot memory
  109. * @start: pfn where the bitmap is to be placed
  110. * @pages: number of available physical pages
  111. *
  112. * Returns the number of bytes needed to hold the bitmap.
  113. */
  114. unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
  115. {
  116. max_low_pfn = pages;
  117. min_low_pfn = start;
  118. return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
  119. }
  120. static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
  121. {
  122. struct page *page;
  123. unsigned long pfn;
  124. unsigned long i, count;
  125. unsigned long idx;
  126. unsigned long *map;
  127. int gofast = 0;
  128. BUG_ON(!bdata->node_bootmem_map);
  129. count = 0;
  130. /* first extant page of the node */
  131. pfn = PFN_DOWN(bdata->node_boot_start);
  132. idx = bdata->node_low_pfn - pfn;
  133. map = bdata->node_bootmem_map;
  134. /*
  135. * Check if we are aligned to BITS_PER_LONG pages. If so, we might
  136. * be able to free page orders of that size at once.
  137. */
  138. if (!(pfn & (BITS_PER_LONG-1)))
  139. gofast = 1;
  140. for (i = 0; i < idx; ) {
  141. unsigned long v = ~map[i / BITS_PER_LONG];
  142. if (gofast && v == ~0UL) {
  143. int order;
  144. page = pfn_to_page(pfn);
  145. count += BITS_PER_LONG;
  146. order = ffs(BITS_PER_LONG) - 1;
  147. __free_pages_bootmem(page, order);
  148. i += BITS_PER_LONG;
  149. page += BITS_PER_LONG;
  150. } else if (v) {
  151. unsigned long m;
  152. page = pfn_to_page(pfn);
  153. for (m = 1; m && i < idx; m<<=1, page++, i++) {
  154. if (v & m) {
  155. count++;
  156. __free_pages_bootmem(page, 0);
  157. }
  158. }
  159. } else {
  160. i += BITS_PER_LONG;
  161. }
  162. pfn += BITS_PER_LONG;
  163. }
  164. /*
  165. * Now free the allocator bitmap itself, it's not
  166. * needed anymore:
  167. */
  168. page = virt_to_page(bdata->node_bootmem_map);
  169. idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
  170. for (i = 0; i < idx; i++, page++)
  171. __free_pages_bootmem(page, 0);
  172. count += i;
  173. bdata->node_bootmem_map = NULL;
  174. return count;
  175. }
  176. /**
  177. * free_all_bootmem_node - release a node's free pages to the buddy allocator
  178. * @pgdat: node to be released
  179. *
  180. * Returns the number of pages actually released.
  181. */
  182. unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
  183. {
  184. register_page_bootmem_info_node(pgdat);
  185. return free_all_bootmem_core(pgdat->bdata);
  186. }
  187. /**
  188. * free_all_bootmem - release free pages to the buddy allocator
  189. *
  190. * Returns the number of pages actually released.
  191. */
  192. unsigned long __init free_all_bootmem(void)
  193. {
  194. return free_all_bootmem_core(NODE_DATA(0)->bdata);
  195. }
  196. static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
  197. unsigned long size)
  198. {
  199. unsigned long sidx, eidx;
  200. unsigned long i;
  201. BUG_ON(!size);
  202. /* out range */
  203. if (addr + size < bdata->node_boot_start ||
  204. PFN_DOWN(addr) > bdata->node_low_pfn)
  205. return;
  206. /*
  207. * round down end of usable mem, partially free pages are
  208. * considered reserved.
  209. */
  210. if (addr >= bdata->node_boot_start && addr < bdata->last_success)
  211. bdata->last_success = addr;
  212. /*
  213. * Round up to index to the range.
  214. */
  215. if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
  216. sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
  217. else
  218. sidx = 0;
  219. eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
  220. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  221. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  222. for (i = sidx; i < eidx; i++) {
  223. if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
  224. BUG();
  225. }
  226. }
  227. /**
  228. * free_bootmem_node - mark a page range as usable
  229. * @pgdat: node the range resides on
  230. * @physaddr: starting address of the range
  231. * @size: size of the range in bytes
  232. *
  233. * Partial pages will be considered reserved and left as they are.
  234. *
  235. * Only physical pages that actually reside on @pgdat are marked.
  236. */
  237. void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  238. unsigned long size)
  239. {
  240. free_bootmem_core(pgdat->bdata, physaddr, size);
  241. }
  242. /**
  243. * free_bootmem - mark a page range as usable
  244. * @addr: starting address of the range
  245. * @size: size of the range in bytes
  246. *
  247. * Partial pages will be considered reserved and left as they are.
  248. *
  249. * All physical pages within the range are marked, no matter what
  250. * node they reside on.
  251. */
  252. void __init free_bootmem(unsigned long addr, unsigned long size)
  253. {
  254. bootmem_data_t *bdata;
  255. list_for_each_entry(bdata, &bdata_list, list)
  256. free_bootmem_core(bdata, addr, size);
  257. }
  258. /*
  259. * Marks a particular physical memory range as unallocatable. Usable RAM
  260. * might be used for boot-time allocations - or it might get added
  261. * to the free page pool later on.
  262. */
  263. static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
  264. unsigned long addr, unsigned long size, int flags)
  265. {
  266. unsigned long sidx, eidx;
  267. unsigned long i;
  268. BUG_ON(!size);
  269. /* out of range, don't hold other */
  270. if (addr + size < bdata->node_boot_start ||
  271. PFN_DOWN(addr) > bdata->node_low_pfn)
  272. return 0;
  273. /*
  274. * Round up to index to the range.
  275. */
  276. if (addr > bdata->node_boot_start)
  277. sidx= PFN_DOWN(addr - bdata->node_boot_start);
  278. else
  279. sidx = 0;
  280. eidx = PFN_UP(addr + size - bdata->node_boot_start);
  281. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  282. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  283. for (i = sidx; i < eidx; i++) {
  284. if (test_bit(i, bdata->node_bootmem_map)) {
  285. if (flags & BOOTMEM_EXCLUSIVE)
  286. return -EBUSY;
  287. }
  288. }
  289. return 0;
  290. }
  291. static void __init reserve_bootmem_core(bootmem_data_t *bdata,
  292. unsigned long addr, unsigned long size, int flags)
  293. {
  294. unsigned long sidx, eidx;
  295. unsigned long i;
  296. BUG_ON(!size);
  297. /* out of range */
  298. if (addr + size < bdata->node_boot_start ||
  299. PFN_DOWN(addr) > bdata->node_low_pfn)
  300. return;
  301. /*
  302. * Round up to index to the range.
  303. */
  304. if (addr > bdata->node_boot_start)
  305. sidx= PFN_DOWN(addr - bdata->node_boot_start);
  306. else
  307. sidx = 0;
  308. eidx = PFN_UP(addr + size - bdata->node_boot_start);
  309. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  310. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  311. for (i = sidx; i < eidx; i++) {
  312. if (test_and_set_bit(i, bdata->node_bootmem_map)) {
  313. #ifdef CONFIG_DEBUG_BOOTMEM
  314. printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
  315. #endif
  316. }
  317. }
  318. }
  319. /**
  320. * reserve_bootmem_node - mark a page range as reserved
  321. * @pgdat: node the range resides on
  322. * @physaddr: starting address of the range
  323. * @size: size of the range in bytes
  324. * @flags: reservation flags (see linux/bootmem.h)
  325. *
  326. * Partial pages will be reserved.
  327. *
  328. * Only physical pages that actually reside on @pgdat are marked.
  329. */
  330. int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  331. unsigned long size, int flags)
  332. {
  333. int ret;
  334. ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
  335. if (ret < 0)
  336. return -ENOMEM;
  337. reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
  338. return 0;
  339. }
  340. #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
  341. /**
  342. * reserve_bootmem - mark a page range as usable
  343. * @addr: starting address of the range
  344. * @size: size of the range in bytes
  345. * @flags: reservation flags (see linux/bootmem.h)
  346. *
  347. * Partial pages will be reserved.
  348. *
  349. * All physical pages within the range are marked, no matter what
  350. * node they reside on.
  351. */
  352. int __init reserve_bootmem(unsigned long addr, unsigned long size,
  353. int flags)
  354. {
  355. bootmem_data_t *bdata;
  356. int ret;
  357. list_for_each_entry(bdata, &bdata_list, list) {
  358. ret = can_reserve_bootmem_core(bdata, addr, size, flags);
  359. if (ret < 0)
  360. return ret;
  361. }
  362. list_for_each_entry(bdata, &bdata_list, list)
  363. reserve_bootmem_core(bdata, addr, size, flags);
  364. return 0;
  365. }
  366. #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
  367. /*
  368. * We 'merge' subsequent allocations to save space. We might 'lose'
  369. * some fraction of a page if allocations cannot be satisfied due to
  370. * size constraints on boxes where there is physical RAM space
  371. * fragmentation - in these cases (mostly large memory boxes) this
  372. * is not a problem.
  373. *
  374. * On low memory boxes we get it right in 100% of the cases.
  375. *
  376. * alignment has to be a power of 2 value.
  377. *
  378. * NOTE: This function is _not_ reentrant.
  379. */
  380. static void * __init
  381. alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
  382. unsigned long align, unsigned long goal, unsigned long limit)
  383. {
  384. unsigned long areasize, preferred;
  385. unsigned long i, start = 0, incr, eidx, end_pfn;
  386. void *ret;
  387. unsigned long node_boot_start;
  388. void *node_bootmem_map;
  389. if (!size) {
  390. printk("alloc_bootmem_core(): zero-sized request\n");
  391. BUG();
  392. }
  393. BUG_ON(align & (align-1));
  394. /* on nodes without memory - bootmem_map is NULL */
  395. if (!bdata->node_bootmem_map)
  396. return NULL;
  397. /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
  398. node_boot_start = bdata->node_boot_start;
  399. node_bootmem_map = bdata->node_bootmem_map;
  400. if (align) {
  401. node_boot_start = ALIGN(bdata->node_boot_start, align);
  402. if (node_boot_start > bdata->node_boot_start)
  403. node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
  404. PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
  405. }
  406. if (limit && node_boot_start >= limit)
  407. return NULL;
  408. end_pfn = bdata->node_low_pfn;
  409. limit = PFN_DOWN(limit);
  410. if (limit && end_pfn > limit)
  411. end_pfn = limit;
  412. eidx = end_pfn - PFN_DOWN(node_boot_start);
  413. /*
  414. * We try to allocate bootmem pages above 'goal'
  415. * first, then we try to allocate lower pages.
  416. */
  417. preferred = 0;
  418. if (goal && PFN_DOWN(goal) < end_pfn) {
  419. if (goal > node_boot_start)
  420. preferred = goal - node_boot_start;
  421. if (bdata->last_success > node_boot_start &&
  422. bdata->last_success - node_boot_start >= preferred)
  423. if (!limit || (limit && limit > bdata->last_success))
  424. preferred = bdata->last_success - node_boot_start;
  425. }
  426. preferred = PFN_DOWN(ALIGN(preferred, align));
  427. areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
  428. incr = align >> PAGE_SHIFT ? : 1;
  429. restart_scan:
  430. for (i = preferred; i < eidx;) {
  431. unsigned long j;
  432. i = find_next_zero_bit(node_bootmem_map, eidx, i);
  433. i = ALIGN(i, incr);
  434. if (i >= eidx)
  435. break;
  436. if (test_bit(i, node_bootmem_map)) {
  437. i += incr;
  438. continue;
  439. }
  440. for (j = i + 1; j < i + areasize; ++j) {
  441. if (j >= eidx)
  442. goto fail_block;
  443. if (test_bit(j, node_bootmem_map))
  444. goto fail_block;
  445. }
  446. start = i;
  447. goto found;
  448. fail_block:
  449. i = ALIGN(j, incr);
  450. if (i == j)
  451. i += incr;
  452. }
  453. if (preferred > 0) {
  454. preferred = 0;
  455. goto restart_scan;
  456. }
  457. return NULL;
  458. found:
  459. bdata->last_success = PFN_PHYS(start) + node_boot_start;
  460. BUG_ON(start >= eidx);
  461. /*
  462. * Is the next page of the previous allocation-end the start
  463. * of this allocation's buffer? If yes then we can 'merge'
  464. * the previous partial page with this allocation.
  465. */
  466. if (align < PAGE_SIZE &&
  467. bdata->last_offset && bdata->last_pos+1 == start) {
  468. unsigned long offset, remaining_size;
  469. offset = ALIGN(bdata->last_offset, align);
  470. BUG_ON(offset > PAGE_SIZE);
  471. remaining_size = PAGE_SIZE - offset;
  472. if (size < remaining_size) {
  473. areasize = 0;
  474. /* last_pos unchanged */
  475. bdata->last_offset = offset + size;
  476. ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
  477. offset + node_boot_start);
  478. } else {
  479. remaining_size = size - remaining_size;
  480. areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
  481. ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
  482. offset + node_boot_start);
  483. bdata->last_pos = start + areasize - 1;
  484. bdata->last_offset = remaining_size;
  485. }
  486. bdata->last_offset &= ~PAGE_MASK;
  487. } else {
  488. bdata->last_pos = start + areasize - 1;
  489. bdata->last_offset = size & ~PAGE_MASK;
  490. ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
  491. }
  492. /*
  493. * Reserve the area now:
  494. */
  495. for (i = start; i < start + areasize; i++)
  496. if (unlikely(test_and_set_bit(i, node_bootmem_map)))
  497. BUG();
  498. memset(ret, 0, size);
  499. return ret;
  500. }
  501. /**
  502. * __alloc_bootmem_nopanic - allocate boot memory without panicking
  503. * @size: size of the request in bytes
  504. * @align: alignment of the region
  505. * @goal: preferred starting address of the region
  506. *
  507. * The goal is dropped if it can not be satisfied and the allocation will
  508. * fall back to memory below @goal.
  509. *
  510. * Allocation may happen on any node in the system.
  511. *
  512. * Returns NULL on failure.
  513. */
  514. void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
  515. unsigned long goal)
  516. {
  517. bootmem_data_t *bdata;
  518. void *ptr;
  519. list_for_each_entry(bdata, &bdata_list, list) {
  520. ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
  521. if (ptr)
  522. return ptr;
  523. }
  524. return NULL;
  525. }
  526. /**
  527. * __alloc_bootmem - allocate boot memory
  528. * @size: size of the request in bytes
  529. * @align: alignment of the region
  530. * @goal: preferred starting address of the region
  531. *
  532. * The goal is dropped if it can not be satisfied and the allocation will
  533. * fall back to memory below @goal.
  534. *
  535. * Allocation may happen on any node in the system.
  536. *
  537. * The function panics if the request can not be satisfied.
  538. */
  539. void * __init __alloc_bootmem(unsigned long size, unsigned long align,
  540. unsigned long goal)
  541. {
  542. void *mem = __alloc_bootmem_nopanic(size,align,goal);
  543. if (mem)
  544. return mem;
  545. /*
  546. * Whoops, we cannot satisfy the allocation request.
  547. */
  548. printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
  549. panic("Out of memory");
  550. return NULL;
  551. }
  552. /**
  553. * __alloc_bootmem_node - allocate boot memory from a specific node
  554. * @pgdat: node to allocate from
  555. * @size: size of the request in bytes
  556. * @align: alignment of the region
  557. * @goal: preferred starting address of the region
  558. *
  559. * The goal is dropped if it can not be satisfied and the allocation will
  560. * fall back to memory below @goal.
  561. *
  562. * Allocation may fall back to any node in the system if the specified node
  563. * can not hold the requested memory.
  564. *
  565. * The function panics if the request can not be satisfied.
  566. */
  567. void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  568. unsigned long align, unsigned long goal)
  569. {
  570. void *ptr;
  571. ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
  572. if (ptr)
  573. return ptr;
  574. return __alloc_bootmem(size, align, goal);
  575. }
  576. #ifdef CONFIG_SPARSEMEM
  577. /**
  578. * alloc_bootmem_section - allocate boot memory from a specific section
  579. * @size: size of the request in bytes
  580. * @section_nr: sparse map section to allocate from
  581. *
  582. * Return NULL on failure.
  583. */
  584. void * __init alloc_bootmem_section(unsigned long size,
  585. unsigned long section_nr)
  586. {
  587. void *ptr;
  588. unsigned long limit, goal, start_nr, end_nr, pfn;
  589. struct pglist_data *pgdat;
  590. pfn = section_nr_to_pfn(section_nr);
  591. goal = PFN_PHYS(pfn);
  592. limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
  593. pgdat = NODE_DATA(early_pfn_to_nid(pfn));
  594. ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
  595. limit);
  596. if (!ptr)
  597. return NULL;
  598. start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
  599. end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
  600. if (start_nr != section_nr || end_nr != section_nr) {
  601. printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
  602. section_nr);
  603. free_bootmem_core(pgdat->bdata, __pa(ptr), size);
  604. ptr = NULL;
  605. }
  606. return ptr;
  607. }
  608. #endif
  609. void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
  610. unsigned long align, unsigned long goal)
  611. {
  612. void *ptr;
  613. ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
  614. if (ptr)
  615. return ptr;
  616. return __alloc_bootmem_nopanic(size, align, goal);
  617. }
  618. #ifndef ARCH_LOW_ADDRESS_LIMIT
  619. #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
  620. #endif
  621. /**
  622. * __alloc_bootmem_low - allocate low boot memory
  623. * @size: size of the request in bytes
  624. * @align: alignment of the region
  625. * @goal: preferred starting address of the region
  626. *
  627. * The goal is dropped if it can not be satisfied and the allocation will
  628. * fall back to memory below @goal.
  629. *
  630. * Allocation may happen on any node in the system.
  631. *
  632. * The function panics if the request can not be satisfied.
  633. */
  634. void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
  635. unsigned long goal)
  636. {
  637. bootmem_data_t *bdata;
  638. void *ptr;
  639. list_for_each_entry(bdata, &bdata_list, list) {
  640. ptr = alloc_bootmem_core(bdata, size, align, goal,
  641. ARCH_LOW_ADDRESS_LIMIT);
  642. if (ptr)
  643. return ptr;
  644. }
  645. /*
  646. * Whoops, we cannot satisfy the allocation request.
  647. */
  648. printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
  649. panic("Out of low memory");
  650. return NULL;
  651. }
  652. /**
  653. * __alloc_bootmem_low_node - allocate low boot memory from a specific node
  654. * @pgdat: node to allocate from
  655. * @size: size of the request in bytes
  656. * @align: alignment of the region
  657. * @goal: preferred starting address of the region
  658. *
  659. * The goal is dropped if it can not be satisfied and the allocation will
  660. * fall back to memory below @goal.
  661. *
  662. * Allocation may fall back to any node in the system if the specified node
  663. * can not hold the requested memory.
  664. *
  665. * The function panics if the request can not be satisfied.
  666. */
  667. void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
  668. unsigned long align, unsigned long goal)
  669. {
  670. return alloc_bootmem_core(pgdat->bdata, size, align, goal,
  671. ARCH_LOW_ADDRESS_LIMIT);
  672. }