init.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * linux/arch/arm/mm/init.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/swap.h>
  13. #include <linux/init.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/mman.h>
  16. #include <linux/nodemask.h>
  17. #include <linux/initrd.h>
  18. #include <linux/sort.h>
  19. #include <linux/highmem.h>
  20. #include <linux/gfp.h>
  21. #include <asm/mach-types.h>
  22. #include <asm/sections.h>
  23. #include <asm/setup.h>
  24. #include <asm/sizes.h>
  25. #include <asm/tlb.h>
  26. #include <asm/fixmap.h>
  27. #include <asm/mach/arch.h>
  28. #include <asm/mach/map.h>
  29. #include "mm.h"
  30. static unsigned long phys_initrd_start __initdata = 0;
  31. static unsigned long phys_initrd_size __initdata = 0;
  32. static int __init early_initrd(char *p)
  33. {
  34. unsigned long start, size;
  35. char *endp;
  36. start = memparse(p, &endp);
  37. if (*endp == ',') {
  38. size = memparse(endp + 1, NULL);
  39. phys_initrd_start = start;
  40. phys_initrd_size = size;
  41. }
  42. return 0;
  43. }
  44. early_param("initrd", early_initrd);
  45. static int __init parse_tag_initrd(const struct tag *tag)
  46. {
  47. printk(KERN_WARNING "ATAG_INITRD is deprecated; "
  48. "please update your bootloader.\n");
  49. phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
  50. phys_initrd_size = tag->u.initrd.size;
  51. return 0;
  52. }
  53. __tagtable(ATAG_INITRD, parse_tag_initrd);
  54. static int __init parse_tag_initrd2(const struct tag *tag)
  55. {
  56. phys_initrd_start = tag->u.initrd.start;
  57. phys_initrd_size = tag->u.initrd.size;
  58. return 0;
  59. }
  60. __tagtable(ATAG_INITRD2, parse_tag_initrd2);
  61. /*
  62. * This keeps memory configuration data used by a couple memory
  63. * initialization functions, as well as show_mem() for the skipping
  64. * of holes in the memory map. It is populated by arm_add_memory().
  65. */
  66. struct meminfo meminfo;
  67. void show_mem(void)
  68. {
  69. int free = 0, total = 0, reserved = 0;
  70. int shared = 0, cached = 0, slab = 0, node, i;
  71. struct meminfo * mi = &meminfo;
  72. printk("Mem-info:\n");
  73. show_free_areas();
  74. for_each_online_node(node) {
  75. for_each_nodebank (i,mi,node) {
  76. struct membank *bank = &mi->bank[i];
  77. unsigned int pfn1, pfn2;
  78. struct page *page, *end;
  79. pfn1 = bank_pfn_start(bank);
  80. pfn2 = bank_pfn_end(bank);
  81. page = pfn_to_page(pfn1);
  82. end = pfn_to_page(pfn2 - 1) + 1;
  83. do {
  84. total++;
  85. if (PageReserved(page))
  86. reserved++;
  87. else if (PageSwapCache(page))
  88. cached++;
  89. else if (PageSlab(page))
  90. slab++;
  91. else if (!page_count(page))
  92. free++;
  93. else
  94. shared += page_count(page) - 1;
  95. page++;
  96. } while (page < end);
  97. }
  98. }
  99. printk("%d pages of RAM\n", total);
  100. printk("%d free pages\n", free);
  101. printk("%d reserved pages\n", reserved);
  102. printk("%d slab pages\n", slab);
  103. printk("%d pages shared\n", shared);
  104. printk("%d pages swap cached\n", cached);
  105. }
  106. static void __init find_node_limits(int node, struct meminfo *mi,
  107. unsigned long *min, unsigned long *max_low, unsigned long *max_high)
  108. {
  109. int i;
  110. *min = -1UL;
  111. *max_low = *max_high = 0;
  112. for_each_nodebank(i, mi, node) {
  113. struct membank *bank = &mi->bank[i];
  114. unsigned long start, end;
  115. start = bank_pfn_start(bank);
  116. end = bank_pfn_end(bank);
  117. if (*min > start)
  118. *min = start;
  119. if (*max_high < end)
  120. *max_high = end;
  121. if (bank->highmem)
  122. continue;
  123. if (*max_low < end)
  124. *max_low = end;
  125. }
  126. }
  127. /*
  128. * FIXME: We really want to avoid allocating the bootmap bitmap
  129. * over the top of the initrd. Hopefully, this is located towards
  130. * the start of a bank, so if we allocate the bootmap bitmap at
  131. * the end, we won't clash.
  132. */
  133. static unsigned int __init
  134. find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
  135. {
  136. unsigned int start_pfn, i, bootmap_pfn;
  137. start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
  138. bootmap_pfn = 0;
  139. for_each_nodebank(i, mi, node) {
  140. struct membank *bank = &mi->bank[i];
  141. unsigned int start, end;
  142. start = bank_pfn_start(bank);
  143. end = bank_pfn_end(bank);
  144. if (end < start_pfn)
  145. continue;
  146. if (start < start_pfn)
  147. start = start_pfn;
  148. if (end <= start)
  149. continue;
  150. if (end - start >= bootmap_pages) {
  151. bootmap_pfn = start;
  152. break;
  153. }
  154. }
  155. if (bootmap_pfn == 0)
  156. BUG();
  157. return bootmap_pfn;
  158. }
  159. static int __init check_initrd(struct meminfo *mi)
  160. {
  161. int initrd_node = -2;
  162. #ifdef CONFIG_BLK_DEV_INITRD
  163. unsigned long end = phys_initrd_start + phys_initrd_size;
  164. /*
  165. * Make sure that the initrd is within a valid area of
  166. * memory.
  167. */
  168. if (phys_initrd_size) {
  169. unsigned int i;
  170. initrd_node = -1;
  171. for (i = 0; i < mi->nr_banks; i++) {
  172. struct membank *bank = &mi->bank[i];
  173. if (bank_phys_start(bank) <= phys_initrd_start &&
  174. end <= bank_phys_end(bank))
  175. initrd_node = bank->node;
  176. }
  177. }
  178. if (initrd_node == -1) {
  179. printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
  180. "physical memory - disabling initrd\n",
  181. phys_initrd_start, phys_initrd_size);
  182. phys_initrd_start = phys_initrd_size = 0;
  183. }
  184. #endif
  185. return initrd_node;
  186. }
  187. static inline void map_memory_bank(struct membank *bank)
  188. {
  189. #ifdef CONFIG_MMU
  190. struct map_desc map;
  191. map.pfn = bank_pfn_start(bank);
  192. map.virtual = __phys_to_virt(bank_phys_start(bank));
  193. map.length = bank_phys_size(bank);
  194. map.type = MT_MEMORY;
  195. create_mapping(&map);
  196. #endif
  197. }
  198. static void __init bootmem_init_node(int node, struct meminfo *mi,
  199. unsigned long start_pfn, unsigned long end_pfn)
  200. {
  201. unsigned long boot_pfn;
  202. unsigned int boot_pages;
  203. pg_data_t *pgdat;
  204. int i;
  205. /*
  206. * Map the memory banks for this node.
  207. */
  208. for_each_nodebank(i, mi, node) {
  209. struct membank *bank = &mi->bank[i];
  210. if (!bank->highmem)
  211. map_memory_bank(bank);
  212. }
  213. /*
  214. * Allocate the bootmem bitmap page.
  215. */
  216. boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
  217. boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
  218. /*
  219. * Initialise the bootmem allocator for this node, handing the
  220. * memory banks over to bootmem.
  221. */
  222. node_set_online(node);
  223. pgdat = NODE_DATA(node);
  224. init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
  225. for_each_nodebank(i, mi, node) {
  226. struct membank *bank = &mi->bank[i];
  227. if (!bank->highmem)
  228. free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
  229. }
  230. /*
  231. * Reserve the bootmem bitmap for this node.
  232. */
  233. reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
  234. boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
  235. }
  236. static void __init bootmem_reserve_initrd(int node)
  237. {
  238. #ifdef CONFIG_BLK_DEV_INITRD
  239. pg_data_t *pgdat = NODE_DATA(node);
  240. int res;
  241. res = reserve_bootmem_node(pgdat, phys_initrd_start,
  242. phys_initrd_size, BOOTMEM_EXCLUSIVE);
  243. if (res == 0) {
  244. initrd_start = __phys_to_virt(phys_initrd_start);
  245. initrd_end = initrd_start + phys_initrd_size;
  246. } else {
  247. printk(KERN_ERR
  248. "INITRD: 0x%08lx+0x%08lx overlaps in-use "
  249. "memory region - disabling initrd\n",
  250. phys_initrd_start, phys_initrd_size);
  251. }
  252. #endif
  253. }
  254. static void __init bootmem_free_node(int node, struct meminfo *mi)
  255. {
  256. unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
  257. unsigned long min, max_low, max_high;
  258. int i;
  259. find_node_limits(node, mi, &min, &max_low, &max_high);
  260. /*
  261. * initialise the zones within this node.
  262. */
  263. memset(zone_size, 0, sizeof(zone_size));
  264. /*
  265. * The size of this node has already been determined. If we need
  266. * to do anything fancy with the allocation of this memory to the
  267. * zones, now is the time to do it.
  268. */
  269. zone_size[0] = max_low - min;
  270. #ifdef CONFIG_HIGHMEM
  271. zone_size[ZONE_HIGHMEM] = max_high - max_low;
  272. #endif
  273. /*
  274. * For each bank in this node, calculate the size of the holes.
  275. * holes = node_size - sum(bank_sizes_in_node)
  276. */
  277. memcpy(zhole_size, zone_size, sizeof(zhole_size));
  278. for_each_nodebank(i, mi, node) {
  279. int idx = 0;
  280. #ifdef CONFIG_HIGHMEM
  281. if (mi->bank[i].highmem)
  282. idx = ZONE_HIGHMEM;
  283. #endif
  284. zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
  285. }
  286. /*
  287. * Adjust the sizes according to any special requirements for
  288. * this machine type.
  289. */
  290. arch_adjust_zones(node, zone_size, zhole_size);
  291. free_area_init_node(node, zone_size, min, zhole_size);
  292. }
  293. #ifndef CONFIG_SPARSEMEM
  294. int pfn_valid(unsigned long pfn)
  295. {
  296. struct meminfo *mi = &meminfo;
  297. unsigned int left = 0, right = mi->nr_banks;
  298. do {
  299. unsigned int mid = (right + left) / 2;
  300. struct membank *bank = &mi->bank[mid];
  301. if (pfn < bank_pfn_start(bank))
  302. right = mid;
  303. else if (pfn >= bank_pfn_end(bank))
  304. left = mid + 1;
  305. else
  306. return 1;
  307. } while (left < right);
  308. return 0;
  309. }
  310. EXPORT_SYMBOL(pfn_valid);
  311. static void arm_memory_present(struct meminfo *mi, int node)
  312. {
  313. }
  314. #else
  315. static void arm_memory_present(struct meminfo *mi, int node)
  316. {
  317. int i;
  318. for_each_nodebank(i, mi, node) {
  319. struct membank *bank = &mi->bank[i];
  320. memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
  321. }
  322. }
  323. #endif
  324. static int __init meminfo_cmp(const void *_a, const void *_b)
  325. {
  326. const struct membank *a = _a, *b = _b;
  327. long cmp = bank_pfn_start(a) - bank_pfn_start(b);
  328. return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
  329. }
  330. void __init bootmem_init(void)
  331. {
  332. struct meminfo *mi = &meminfo;
  333. unsigned long min, max_low, max_high;
  334. int node, initrd_node;
  335. sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
  336. /*
  337. * Locate which node contains the ramdisk image, if any.
  338. */
  339. initrd_node = check_initrd(mi);
  340. max_low = max_high = 0;
  341. /*
  342. * Run through each node initialising the bootmem allocator.
  343. */
  344. for_each_node(node) {
  345. unsigned long node_low, node_high;
  346. find_node_limits(node, mi, &min, &node_low, &node_high);
  347. if (node_low > max_low)
  348. max_low = node_low;
  349. if (node_high > max_high)
  350. max_high = node_high;
  351. /*
  352. * If there is no memory in this node, ignore it.
  353. * (We can't have nodes which have no lowmem)
  354. */
  355. if (node_low == 0)
  356. continue;
  357. bootmem_init_node(node, mi, min, node_low);
  358. /*
  359. * Reserve any special node zero regions.
  360. */
  361. if (node == 0)
  362. reserve_node_zero(NODE_DATA(node));
  363. /*
  364. * If the initrd is in this node, reserve its memory.
  365. */
  366. if (node == initrd_node)
  367. bootmem_reserve_initrd(node);
  368. /*
  369. * Sparsemem tries to allocate bootmem in memory_present(),
  370. * so must be done after the fixed reservations
  371. */
  372. arm_memory_present(mi, node);
  373. }
  374. /*
  375. * sparse_init() needs the bootmem allocator up and running.
  376. */
  377. sparse_init();
  378. /*
  379. * Now free memory in each node - free_area_init_node needs
  380. * the sparse mem_map arrays initialized by sparse_init()
  381. * for memmap_init_zone(), otherwise all PFNs are invalid.
  382. */
  383. for_each_node(node)
  384. bootmem_free_node(node, mi);
  385. high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
  386. /*
  387. * This doesn't seem to be used by the Linux memory manager any
  388. * more, but is used by ll_rw_block. If we can get rid of it, we
  389. * also get rid of some of the stuff above as well.
  390. *
  391. * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
  392. * the system, not the maximum PFN.
  393. */
  394. max_low_pfn = max_low - PHYS_PFN_OFFSET;
  395. max_pfn = max_high - PHYS_PFN_OFFSET;
  396. }
  397. static inline int free_area(unsigned long pfn, unsigned long end, char *s)
  398. {
  399. unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
  400. for (; pfn < end; pfn++) {
  401. struct page *page = pfn_to_page(pfn);
  402. ClearPageReserved(page);
  403. init_page_count(page);
  404. __free_page(page);
  405. pages++;
  406. }
  407. if (size && s)
  408. printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
  409. return pages;
  410. }
  411. static inline void
  412. free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
  413. {
  414. struct page *start_pg, *end_pg;
  415. unsigned long pg, pgend;
  416. /*
  417. * Convert start_pfn/end_pfn to a struct page pointer.
  418. */
  419. start_pg = pfn_to_page(start_pfn - 1) + 1;
  420. end_pg = pfn_to_page(end_pfn);
  421. /*
  422. * Convert to physical addresses, and
  423. * round start upwards and end downwards.
  424. */
  425. pg = PAGE_ALIGN(__pa(start_pg));
  426. pgend = __pa(end_pg) & PAGE_MASK;
  427. /*
  428. * If there are free pages between these,
  429. * free the section of the memmap array.
  430. */
  431. if (pg < pgend)
  432. free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
  433. }
  434. /*
  435. * The mem_map array can get very big. Free the unused area of the memory map.
  436. */
  437. static void __init free_unused_memmap_node(int node, struct meminfo *mi)
  438. {
  439. unsigned long bank_start, prev_bank_end = 0;
  440. unsigned int i;
  441. /*
  442. * [FIXME] This relies on each bank being in address order. This
  443. * may not be the case, especially if the user has provided the
  444. * information on the command line.
  445. */
  446. for_each_nodebank(i, mi, node) {
  447. struct membank *bank = &mi->bank[i];
  448. bank_start = bank_pfn_start(bank);
  449. if (bank_start < prev_bank_end) {
  450. printk(KERN_ERR "MEM: unordered memory banks. "
  451. "Not freeing memmap.\n");
  452. break;
  453. }
  454. /*
  455. * If we had a previous bank, and there is a space
  456. * between the current bank and the previous, free it.
  457. */
  458. if (prev_bank_end && prev_bank_end != bank_start)
  459. free_memmap(node, prev_bank_end, bank_start);
  460. prev_bank_end = bank_pfn_end(bank);
  461. }
  462. }
  463. /*
  464. * mem_init() marks the free areas in the mem_map and tells us how much
  465. * memory is free. This is done after various parts of the system have
  466. * claimed their memory after the kernel image.
  467. */
  468. void __init mem_init(void)
  469. {
  470. unsigned long reserved_pages, free_pages;
  471. int i, node;
  472. #ifndef CONFIG_DISCONTIGMEM
  473. max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
  474. #endif
  475. /* this will put all unused low memory onto the freelists */
  476. for_each_online_node(node) {
  477. pg_data_t *pgdat = NODE_DATA(node);
  478. free_unused_memmap_node(node, &meminfo);
  479. if (pgdat->node_spanned_pages != 0)
  480. totalram_pages += free_all_bootmem_node(pgdat);
  481. }
  482. #ifdef CONFIG_SA1111
  483. /* now that our DMA memory is actually so designated, we can free it */
  484. totalram_pages += free_area(PHYS_PFN_OFFSET,
  485. __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
  486. #endif
  487. #ifdef CONFIG_HIGHMEM
  488. /* set highmem page free */
  489. for_each_online_node(node) {
  490. for_each_nodebank (i, &meminfo, node) {
  491. unsigned long start = bank_pfn_start(&meminfo.bank[i]);
  492. unsigned long end = bank_pfn_end(&meminfo.bank[i]);
  493. if (start >= max_low_pfn + PHYS_PFN_OFFSET)
  494. totalhigh_pages += free_area(start, end, NULL);
  495. }
  496. }
  497. totalram_pages += totalhigh_pages;
  498. #endif
  499. reserved_pages = free_pages = 0;
  500. for_each_online_node(node) {
  501. for_each_nodebank(i, &meminfo, node) {
  502. struct membank *bank = &meminfo.bank[i];
  503. unsigned int pfn1, pfn2;
  504. struct page *page, *end;
  505. pfn1 = bank_pfn_start(bank);
  506. pfn2 = bank_pfn_end(bank);
  507. page = pfn_to_page(pfn1);
  508. end = pfn_to_page(pfn2 - 1) + 1;
  509. do {
  510. if (PageReserved(page))
  511. reserved_pages++;
  512. else if (!page_count(page))
  513. free_pages++;
  514. page++;
  515. } while (page < end);
  516. }
  517. }
  518. /*
  519. * Since our memory may not be contiguous, calculate the
  520. * real number of pages we have in this system
  521. */
  522. printk(KERN_INFO "Memory:");
  523. num_physpages = 0;
  524. for (i = 0; i < meminfo.nr_banks; i++) {
  525. num_physpages += bank_pfn_size(&meminfo.bank[i]);
  526. printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
  527. }
  528. printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
  529. printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
  530. nr_free_pages() << (PAGE_SHIFT-10),
  531. free_pages << (PAGE_SHIFT-10),
  532. reserved_pages << (PAGE_SHIFT-10),
  533. totalhigh_pages << (PAGE_SHIFT-10));
  534. #define MLK(b, t) b, t, ((t) - (b)) >> 10
  535. #define MLM(b, t) b, t, ((t) - (b)) >> 20
  536. #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
  537. printk(KERN_NOTICE "Virtual kernel memory layout:\n"
  538. " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
  539. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  540. #ifdef CONFIG_MMU
  541. " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
  542. #endif
  543. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  544. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  545. #ifdef CONFIG_HIGHMEM
  546. " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
  547. #endif
  548. " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
  549. " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
  550. " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
  551. " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
  552. MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
  553. (PAGE_SIZE)),
  554. MLK(FIXADDR_START, FIXADDR_TOP),
  555. #ifdef CONFIG_MMU
  556. MLM(CONSISTENT_BASE, CONSISTENT_END),
  557. #endif
  558. MLM(VMALLOC_START, VMALLOC_END),
  559. MLM(PAGE_OFFSET, (unsigned long)high_memory),
  560. #ifdef CONFIG_HIGHMEM
  561. MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
  562. (PAGE_SIZE)),
  563. #endif
  564. MLM(MODULES_VADDR, MODULES_END),
  565. MLK_ROUNDUP(__init_begin, __init_end),
  566. MLK_ROUNDUP(_text, _etext),
  567. MLK_ROUNDUP(_data, _edata));
  568. #undef MLK
  569. #undef MLM
  570. #undef MLK_ROUNDUP
  571. /*
  572. * Check boundaries twice: Some fundamental inconsistencies can
  573. * be detected at build time already.
  574. */
  575. #ifdef CONFIG_MMU
  576. BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
  577. BUG_ON(VMALLOC_END > CONSISTENT_BASE);
  578. BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
  579. BUG_ON(TASK_SIZE > MODULES_VADDR);
  580. #endif
  581. #ifdef CONFIG_HIGHMEM
  582. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
  583. BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
  584. #endif
  585. if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
  586. extern int sysctl_overcommit_memory;
  587. /*
  588. * On a machine this small we won't get
  589. * anywhere without overcommit, so turn
  590. * it on by default.
  591. */
  592. sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
  593. }
  594. }
  595. void free_initmem(void)
  596. {
  597. #ifdef CONFIG_HAVE_TCM
  598. extern char *__tcm_start, *__tcm_end;
  599. totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
  600. __phys_to_pfn(__pa(__tcm_end)),
  601. "TCM link");
  602. #endif
  603. if (!machine_is_integrator() && !machine_is_cintegrator())
  604. totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
  605. __phys_to_pfn(__pa(__init_end)),
  606. "init");
  607. }
  608. #ifdef CONFIG_BLK_DEV_INITRD
  609. static int keep_initrd;
  610. void free_initrd_mem(unsigned long start, unsigned long end)
  611. {
  612. if (!keep_initrd)
  613. totalram_pages += free_area(__phys_to_pfn(__pa(start)),
  614. __phys_to_pfn(__pa(end)),
  615. "initrd");
  616. }
  617. static int __init keepinitrd_setup(char *__unused)
  618. {
  619. keep_initrd = 1;
  620. return 1;
  621. }
  622. __setup("keepinitrd", keepinitrd_setup);
  623. #endif