init.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/init.h>
  12. #include <linux/mmzone.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/nodemask.h>
  16. #include <asm/page.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/tlb.h>
  19. #include <asm/io.h>
  20. #include <asm/dma.h>
  21. #include <asm/setup.h>
  22. #include <asm/sections.h>
  23. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  24. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  25. struct page *empty_zero_page;
  26. /*
  27. * Cache of MMU context last used.
  28. */
  29. unsigned long mmu_context_cache = NO_CONTEXT;
  30. void show_mem(void)
  31. {
  32. int total = 0, reserved = 0, cached = 0;
  33. int slab = 0, free = 0, shared = 0;
  34. pg_data_t *pgdat;
  35. printk("Mem-info:\n");
  36. show_free_areas();
  37. for_each_online_pgdat(pgdat) {
  38. struct page *page, *end;
  39. page = pgdat->node_mem_map;
  40. end = page + pgdat->node_spanned_pages;
  41. do {
  42. total++;
  43. if (PageReserved(page))
  44. reserved++;
  45. else if (PageSwapCache(page))
  46. cached++;
  47. else if (PageSlab(page))
  48. slab++;
  49. else if (!page_count(page))
  50. free++;
  51. else
  52. shared += page_count(page) - 1;
  53. page++;
  54. } while (page < end);
  55. }
  56. printk ("%d pages of RAM\n", total);
  57. printk ("%d free pages\n", free);
  58. printk ("%d reserved pages\n", reserved);
  59. printk ("%d slab pages\n", slab);
  60. printk ("%d pages shared\n", shared);
  61. printk ("%d pages swap cached\n", cached);
  62. }
  63. /*
  64. * paging_init() sets up the page tables
  65. *
  66. * This routine also unmaps the page at virtual kernel address 0, so
  67. * that we can trap those pesky NULL-reference errors in the kernel.
  68. */
  69. void __init paging_init(void)
  70. {
  71. extern unsigned long _evba;
  72. void *zero_page;
  73. int nid;
  74. /*
  75. * Make sure we can handle exceptions before enabling
  76. * paging. Not that we should ever _get_ any exceptions this
  77. * early, but you never know...
  78. */
  79. printk("Exception vectors start at %p\n", &_evba);
  80. sysreg_write(EVBA, (unsigned long)&_evba);
  81. /*
  82. * Since we are ready to handle exceptions now, we should let
  83. * the CPU generate them...
  84. */
  85. __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT));
  86. /*
  87. * Allocate the zero page. The allocator will panic if it
  88. * can't satisfy the request, so no need to check.
  89. */
  90. zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
  91. PAGE_SIZE);
  92. {
  93. pgd_t *pg_dir;
  94. int i;
  95. pg_dir = swapper_pg_dir;
  96. sysreg_write(PTBR, (unsigned long)pg_dir);
  97. for (i = 0; i < PTRS_PER_PGD; i++)
  98. pgd_val(pg_dir[i]) = 0;
  99. enable_mmu();
  100. printk ("CPU: Paging enabled\n");
  101. }
  102. for_each_online_node(nid) {
  103. pg_data_t *pgdat = NODE_DATA(nid);
  104. unsigned long zones_size[MAX_NR_ZONES];
  105. unsigned long low, start_pfn;
  106. start_pfn = pgdat->bdata->node_boot_start;
  107. start_pfn >>= PAGE_SHIFT;
  108. low = pgdat->bdata->node_low_pfn;
  109. memset(zones_size, 0, sizeof(zones_size));
  110. zones_size[ZONE_NORMAL] = low - start_pfn;
  111. printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
  112. nid, start_pfn, low);
  113. free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL);
  114. printk("Node %u: mem_map starts at %p\n",
  115. pgdat->node_id, pgdat->node_mem_map);
  116. }
  117. mem_map = NODE_DATA(0)->node_mem_map;
  118. memset(zero_page, 0, PAGE_SIZE);
  119. empty_zero_page = virt_to_page(zero_page);
  120. flush_dcache_page(empty_zero_page);
  121. }
  122. void __init mem_init(void)
  123. {
  124. int codesize, reservedpages, datasize, initsize;
  125. int nid, i;
  126. reservedpages = 0;
  127. high_memory = NULL;
  128. /* this will put all low memory onto the freelists */
  129. for_each_online_node(nid) {
  130. pg_data_t *pgdat = NODE_DATA(nid);
  131. unsigned long node_pages = 0;
  132. void *node_high_memory;
  133. num_physpages += pgdat->node_present_pages;
  134. if (pgdat->node_spanned_pages != 0)
  135. node_pages = free_all_bootmem_node(pgdat);
  136. totalram_pages += node_pages;
  137. for (i = 0; i < node_pages; i++)
  138. if (PageReserved(pgdat->node_mem_map + i))
  139. reservedpages++;
  140. node_high_memory = (void *)((pgdat->node_start_pfn
  141. + pgdat->node_spanned_pages)
  142. << PAGE_SHIFT);
  143. if (node_high_memory > high_memory)
  144. high_memory = node_high_memory;
  145. }
  146. max_mapnr = MAP_NR(high_memory);
  147. codesize = (unsigned long)_etext - (unsigned long)_text;
  148. datasize = (unsigned long)_edata - (unsigned long)_data;
  149. initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
  150. printk ("Memory: %luk/%luk available (%dk kernel code, "
  151. "%dk reserved, %dk data, %dk init)\n",
  152. (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
  153. totalram_pages << (PAGE_SHIFT - 10),
  154. codesize >> 10,
  155. reservedpages << (PAGE_SHIFT - 10),
  156. datasize >> 10,
  157. initsize >> 10);
  158. }
  159. static inline void free_area(unsigned long addr, unsigned long end, char *s)
  160. {
  161. unsigned int size = (end - addr) >> 10;
  162. for (; addr < end; addr += PAGE_SIZE) {
  163. struct page *page = virt_to_page(addr);
  164. ClearPageReserved(page);
  165. init_page_count(page);
  166. free_page(addr);
  167. totalram_pages++;
  168. }
  169. if (size && s)
  170. printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
  171. s, size, end - (size << 10), end);
  172. }
  173. void free_initmem(void)
  174. {
  175. free_area((unsigned long)__init_begin, (unsigned long)__init_end,
  176. "init");
  177. }
  178. #ifdef CONFIG_BLK_DEV_INITRD
  179. void free_initrd_mem(unsigned long start, unsigned long end)
  180. {
  181. free_area(start, end, "initrd");
  182. }
  183. #endif