bootmem.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /*
  2. * linux/mm/bootmem.c
  3. *
  4. * Copyright (C) 1999 Ingo Molnar
  5. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  6. *
  7. * simple boot-time physical memory area allocator and
  8. * free memory collector. It's used to deal with reserved
  9. * system memory and memory holes as well.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/pfn.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/module.h>
  15. #include <asm/bug.h>
  16. #include <asm/io.h>
  17. #include <asm/processor.h>
  18. #include "internal.h"
  19. /*
  20. * Access to this subsystem has to be serialized externally. (this is
  21. * true for the boot process anyway)
  22. */
  23. unsigned long max_low_pfn;
  24. unsigned long min_low_pfn;
  25. unsigned long max_pfn;
  26. static LIST_HEAD(bdata_list);
  27. #ifdef CONFIG_CRASH_DUMP
  28. /*
  29. * If we have booted due to a crash, max_pfn will be a very low value. We need
  30. * to know the amount of memory that the previous kernel used.
  31. */
  32. unsigned long saved_max_pfn;
  33. #endif
  34. bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
  35. /* return the number of _pages_ that will be allocated for the boot bitmap */
  36. unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  37. {
  38. unsigned long mapsize;
  39. mapsize = (pages+7)/8;
  40. mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
  41. mapsize >>= PAGE_SHIFT;
  42. return mapsize;
  43. }
  44. /*
  45. * link bdata in order
  46. */
  47. static void __init link_bootmem(bootmem_data_t *bdata)
  48. {
  49. bootmem_data_t *ent;
  50. if (list_empty(&bdata_list)) {
  51. list_add(&bdata->list, &bdata_list);
  52. return;
  53. }
  54. /* insert in order */
  55. list_for_each_entry(ent, &bdata_list, list) {
  56. if (bdata->node_boot_start < ent->node_boot_start) {
  57. list_add_tail(&bdata->list, &ent->list);
  58. return;
  59. }
  60. }
  61. list_add_tail(&bdata->list, &bdata_list);
  62. }
  63. /*
  64. * Given an initialised bdata, it returns the size of the boot bitmap
  65. */
  66. static unsigned long __init get_mapsize(bootmem_data_t *bdata)
  67. {
  68. unsigned long mapsize;
  69. unsigned long start = PFN_DOWN(bdata->node_boot_start);
  70. unsigned long end = bdata->node_low_pfn;
  71. mapsize = ((end - start) + 7) / 8;
  72. return ALIGN(mapsize, sizeof(long));
  73. }
  74. /*
  75. * Called once to set up the allocator itself.
  76. */
  77. static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
  78. unsigned long mapstart, unsigned long start, unsigned long end)
  79. {
  80. unsigned long mapsize;
  81. mminit_validate_memmodel_limits(&start, &end);
  82. bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
  83. bdata->node_boot_start = PFN_PHYS(start);
  84. bdata->node_low_pfn = end;
  85. link_bootmem(bdata);
  86. /*
  87. * Initially all pages are reserved - setup_arch() has to
  88. * register free RAM areas explicitly.
  89. */
  90. mapsize = get_mapsize(bdata);
  91. memset(bdata->node_bootmem_map, 0xff, mapsize);
  92. return mapsize;
  93. }
  94. /*
  95. * Marks a particular physical memory range as unallocatable. Usable RAM
  96. * might be used for boot-time allocations - or it might get added
  97. * to the free page pool later on.
  98. */
  99. static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
  100. unsigned long addr, unsigned long size, int flags)
  101. {
  102. unsigned long sidx, eidx;
  103. unsigned long i;
  104. BUG_ON(!size);
  105. /* out of range, don't hold other */
  106. if (addr + size < bdata->node_boot_start ||
  107. PFN_DOWN(addr) > bdata->node_low_pfn)
  108. return 0;
  109. /*
  110. * Round up to index to the range.
  111. */
  112. if (addr > bdata->node_boot_start)
  113. sidx= PFN_DOWN(addr - bdata->node_boot_start);
  114. else
  115. sidx = 0;
  116. eidx = PFN_UP(addr + size - bdata->node_boot_start);
  117. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  118. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  119. for (i = sidx; i < eidx; i++) {
  120. if (test_bit(i, bdata->node_bootmem_map)) {
  121. if (flags & BOOTMEM_EXCLUSIVE)
  122. return -EBUSY;
  123. }
  124. }
  125. return 0;
  126. }
  127. static void __init reserve_bootmem_core(bootmem_data_t *bdata,
  128. unsigned long addr, unsigned long size, int flags)
  129. {
  130. unsigned long sidx, eidx;
  131. unsigned long i;
  132. BUG_ON(!size);
  133. /* out of range */
  134. if (addr + size < bdata->node_boot_start ||
  135. PFN_DOWN(addr) > bdata->node_low_pfn)
  136. return;
  137. /*
  138. * Round up to index to the range.
  139. */
  140. if (addr > bdata->node_boot_start)
  141. sidx= PFN_DOWN(addr - bdata->node_boot_start);
  142. else
  143. sidx = 0;
  144. eidx = PFN_UP(addr + size - bdata->node_boot_start);
  145. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  146. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  147. for (i = sidx; i < eidx; i++) {
  148. if (test_and_set_bit(i, bdata->node_bootmem_map)) {
  149. #ifdef CONFIG_DEBUG_BOOTMEM
  150. printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
  151. #endif
  152. }
  153. }
  154. }
  155. static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
  156. unsigned long size)
  157. {
  158. unsigned long sidx, eidx;
  159. unsigned long i;
  160. BUG_ON(!size);
  161. /* out range */
  162. if (addr + size < bdata->node_boot_start ||
  163. PFN_DOWN(addr) > bdata->node_low_pfn)
  164. return;
  165. /*
  166. * round down end of usable mem, partially free pages are
  167. * considered reserved.
  168. */
  169. if (addr >= bdata->node_boot_start && addr < bdata->last_success)
  170. bdata->last_success = addr;
  171. /*
  172. * Round up to index to the range.
  173. */
  174. if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
  175. sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
  176. else
  177. sidx = 0;
  178. eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
  179. if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
  180. eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
  181. for (i = sidx; i < eidx; i++) {
  182. if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
  183. BUG();
  184. }
  185. }
  186. /*
  187. * We 'merge' subsequent allocations to save space. We might 'lose'
  188. * some fraction of a page if allocations cannot be satisfied due to
  189. * size constraints on boxes where there is physical RAM space
  190. * fragmentation - in these cases (mostly large memory boxes) this
  191. * is not a problem.
  192. *
  193. * On low memory boxes we get it right in 100% of the cases.
  194. *
  195. * alignment has to be a power of 2 value.
  196. *
  197. * NOTE: This function is _not_ reentrant.
  198. */
  199. static void * __init
  200. alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
  201. unsigned long align, unsigned long goal, unsigned long limit)
  202. {
  203. unsigned long areasize, preferred;
  204. unsigned long i, start = 0, incr, eidx, end_pfn;
  205. void *ret;
  206. unsigned long node_boot_start;
  207. void *node_bootmem_map;
  208. if (!size) {
  209. printk("alloc_bootmem_core(): zero-sized request\n");
  210. BUG();
  211. }
  212. BUG_ON(align & (align-1));
  213. /* on nodes without memory - bootmem_map is NULL */
  214. if (!bdata->node_bootmem_map)
  215. return NULL;
  216. /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
  217. node_boot_start = bdata->node_boot_start;
  218. node_bootmem_map = bdata->node_bootmem_map;
  219. if (align) {
  220. node_boot_start = ALIGN(bdata->node_boot_start, align);
  221. if (node_boot_start > bdata->node_boot_start)
  222. node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
  223. PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
  224. }
  225. if (limit && node_boot_start >= limit)
  226. return NULL;
  227. end_pfn = bdata->node_low_pfn;
  228. limit = PFN_DOWN(limit);
  229. if (limit && end_pfn > limit)
  230. end_pfn = limit;
  231. eidx = end_pfn - PFN_DOWN(node_boot_start);
  232. /*
  233. * We try to allocate bootmem pages above 'goal'
  234. * first, then we try to allocate lower pages.
  235. */
  236. preferred = 0;
  237. if (goal && PFN_DOWN(goal) < end_pfn) {
  238. if (goal > node_boot_start)
  239. preferred = goal - node_boot_start;
  240. if (bdata->last_success > node_boot_start &&
  241. bdata->last_success - node_boot_start >= preferred)
  242. if (!limit || (limit && limit > bdata->last_success))
  243. preferred = bdata->last_success - node_boot_start;
  244. }
  245. preferred = PFN_DOWN(ALIGN(preferred, align));
  246. areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
  247. incr = align >> PAGE_SHIFT ? : 1;
  248. restart_scan:
  249. for (i = preferred; i < eidx;) {
  250. unsigned long j;
  251. i = find_next_zero_bit(node_bootmem_map, eidx, i);
  252. i = ALIGN(i, incr);
  253. if (i >= eidx)
  254. break;
  255. if (test_bit(i, node_bootmem_map)) {
  256. i += incr;
  257. continue;
  258. }
  259. for (j = i + 1; j < i + areasize; ++j) {
  260. if (j >= eidx)
  261. goto fail_block;
  262. if (test_bit(j, node_bootmem_map))
  263. goto fail_block;
  264. }
  265. start = i;
  266. goto found;
  267. fail_block:
  268. i = ALIGN(j, incr);
  269. if (i == j)
  270. i += incr;
  271. }
  272. if (preferred > 0) {
  273. preferred = 0;
  274. goto restart_scan;
  275. }
  276. return NULL;
  277. found:
  278. bdata->last_success = PFN_PHYS(start) + node_boot_start;
  279. BUG_ON(start >= eidx);
  280. /*
  281. * Is the next page of the previous allocation-end the start
  282. * of this allocation's buffer? If yes then we can 'merge'
  283. * the previous partial page with this allocation.
  284. */
  285. if (align < PAGE_SIZE &&
  286. bdata->last_offset && bdata->last_pos+1 == start) {
  287. unsigned long offset, remaining_size;
  288. offset = ALIGN(bdata->last_offset, align);
  289. BUG_ON(offset > PAGE_SIZE);
  290. remaining_size = PAGE_SIZE - offset;
  291. if (size < remaining_size) {
  292. areasize = 0;
  293. /* last_pos unchanged */
  294. bdata->last_offset = offset + size;
  295. ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
  296. offset + node_boot_start);
  297. } else {
  298. remaining_size = size - remaining_size;
  299. areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
  300. ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
  301. offset + node_boot_start);
  302. bdata->last_pos = start + areasize - 1;
  303. bdata->last_offset = remaining_size;
  304. }
  305. bdata->last_offset &= ~PAGE_MASK;
  306. } else {
  307. bdata->last_pos = start + areasize - 1;
  308. bdata->last_offset = size & ~PAGE_MASK;
  309. ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
  310. }
  311. /*
  312. * Reserve the area now:
  313. */
  314. for (i = start; i < start + areasize; i++)
  315. if (unlikely(test_and_set_bit(i, node_bootmem_map)))
  316. BUG();
  317. memset(ret, 0, size);
  318. return ret;
  319. }
  320. static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
  321. {
  322. struct page *page;
  323. unsigned long pfn;
  324. unsigned long i, count;
  325. unsigned long idx;
  326. unsigned long *map;
  327. int gofast = 0;
  328. BUG_ON(!bdata->node_bootmem_map);
  329. count = 0;
  330. /* first extant page of the node */
  331. pfn = PFN_DOWN(bdata->node_boot_start);
  332. idx = bdata->node_low_pfn - pfn;
  333. map = bdata->node_bootmem_map;
  334. /*
  335. * Check if we are aligned to BITS_PER_LONG pages. If so, we might
  336. * be able to free page orders of that size at once.
  337. */
  338. if (!(pfn & (BITS_PER_LONG-1)))
  339. gofast = 1;
  340. for (i = 0; i < idx; ) {
  341. unsigned long v = ~map[i / BITS_PER_LONG];
  342. if (gofast && v == ~0UL) {
  343. int order;
  344. page = pfn_to_page(pfn);
  345. count += BITS_PER_LONG;
  346. order = ffs(BITS_PER_LONG) - 1;
  347. __free_pages_bootmem(page, order);
  348. i += BITS_PER_LONG;
  349. page += BITS_PER_LONG;
  350. } else if (v) {
  351. unsigned long m;
  352. page = pfn_to_page(pfn);
  353. for (m = 1; m && i < idx; m<<=1, page++, i++) {
  354. if (v & m) {
  355. count++;
  356. __free_pages_bootmem(page, 0);
  357. }
  358. }
  359. } else {
  360. i += BITS_PER_LONG;
  361. }
  362. pfn += BITS_PER_LONG;
  363. }
  364. /*
  365. * Now free the allocator bitmap itself, it's not
  366. * needed anymore:
  367. */
  368. page = virt_to_page(bdata->node_bootmem_map);
  369. idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
  370. for (i = 0; i < idx; i++, page++)
  371. __free_pages_bootmem(page, 0);
  372. count += i;
  373. bdata->node_bootmem_map = NULL;
  374. return count;
  375. }
  376. unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
  377. unsigned long startpfn, unsigned long endpfn)
  378. {
  379. return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
  380. }
  381. int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  382. unsigned long size, int flags)
  383. {
  384. int ret;
  385. ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
  386. if (ret < 0)
  387. return -ENOMEM;
  388. reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
  389. return 0;
  390. }
  391. void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  392. unsigned long size)
  393. {
  394. free_bootmem_core(pgdat->bdata, physaddr, size);
  395. }
  396. unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
  397. {
  398. register_page_bootmem_info_node(pgdat);
  399. return free_all_bootmem_core(pgdat->bdata);
  400. }
  401. unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
  402. {
  403. max_low_pfn = pages;
  404. min_low_pfn = start;
  405. return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
  406. }
  407. #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
  408. int __init reserve_bootmem(unsigned long addr, unsigned long size,
  409. int flags)
  410. {
  411. bootmem_data_t *bdata;
  412. int ret;
  413. list_for_each_entry(bdata, &bdata_list, list) {
  414. ret = can_reserve_bootmem_core(bdata, addr, size, flags);
  415. if (ret < 0)
  416. return ret;
  417. }
  418. list_for_each_entry(bdata, &bdata_list, list)
  419. reserve_bootmem_core(bdata, addr, size, flags);
  420. return 0;
  421. }
  422. #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
  423. void __init free_bootmem(unsigned long addr, unsigned long size)
  424. {
  425. bootmem_data_t *bdata;
  426. list_for_each_entry(bdata, &bdata_list, list)
  427. free_bootmem_core(bdata, addr, size);
  428. }
  429. unsigned long __init free_all_bootmem(void)
  430. {
  431. return free_all_bootmem_core(NODE_DATA(0)->bdata);
  432. }
  433. void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
  434. unsigned long goal)
  435. {
  436. bootmem_data_t *bdata;
  437. void *ptr;
  438. list_for_each_entry(bdata, &bdata_list, list) {
  439. ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
  440. if (ptr)
  441. return ptr;
  442. }
  443. return NULL;
  444. }
  445. void * __init __alloc_bootmem(unsigned long size, unsigned long align,
  446. unsigned long goal)
  447. {
  448. void *mem = __alloc_bootmem_nopanic(size,align,goal);
  449. if (mem)
  450. return mem;
  451. /*
  452. * Whoops, we cannot satisfy the allocation request.
  453. */
  454. printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
  455. panic("Out of memory");
  456. return NULL;
  457. }
  458. void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  459. unsigned long align, unsigned long goal)
  460. {
  461. void *ptr;
  462. ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
  463. if (ptr)
  464. return ptr;
  465. return __alloc_bootmem(size, align, goal);
  466. }
  467. #ifdef CONFIG_SPARSEMEM
  468. void * __init alloc_bootmem_section(unsigned long size,
  469. unsigned long section_nr)
  470. {
  471. void *ptr;
  472. unsigned long limit, goal, start_nr, end_nr, pfn;
  473. struct pglist_data *pgdat;
  474. pfn = section_nr_to_pfn(section_nr);
  475. goal = PFN_PHYS(pfn);
  476. limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
  477. pgdat = NODE_DATA(early_pfn_to_nid(pfn));
  478. ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
  479. limit);
  480. if (!ptr)
  481. return NULL;
  482. start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
  483. end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
  484. if (start_nr != section_nr || end_nr != section_nr) {
  485. printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
  486. section_nr);
  487. free_bootmem_core(pgdat->bdata, __pa(ptr), size);
  488. ptr = NULL;
  489. }
  490. return ptr;
  491. }
  492. #endif
  493. #ifndef ARCH_LOW_ADDRESS_LIMIT
  494. #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
  495. #endif
  496. void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
  497. unsigned long goal)
  498. {
  499. bootmem_data_t *bdata;
  500. void *ptr;
  501. list_for_each_entry(bdata, &bdata_list, list) {
  502. ptr = alloc_bootmem_core(bdata, size, align, goal,
  503. ARCH_LOW_ADDRESS_LIMIT);
  504. if (ptr)
  505. return ptr;
  506. }
  507. /*
  508. * Whoops, we cannot satisfy the allocation request.
  509. */
  510. printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
  511. panic("Out of low memory");
  512. return NULL;
  513. }
  514. void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
  515. unsigned long align, unsigned long goal)
  516. {
  517. return alloc_bootmem_core(pgdat->bdata, size, align, goal,
  518. ARCH_LOW_ADDRESS_LIMIT);
  519. }