mem.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <linux/stddef.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/gfp.h>
  8. #include <linux/highmem.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <asm/fixmap.h>
  12. #include <asm/page.h>
  13. #include "as-layout.h"
  14. #include "init.h"
  15. #include "kern.h"
  16. #include "kern_util.h"
  17. #include "mem_user.h"
  18. #include "os.h"
  19. /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
  20. unsigned long *empty_zero_page = NULL;
  21. /* allocated in paging_init and unchanged thereafter */
  22. unsigned long *empty_bad_page = NULL;
  23. /*
  24. * Initialized during boot, and readonly for initializing page tables
  25. * afterwards
  26. */
  27. pgd_t swapper_pg_dir[PTRS_PER_PGD];
  28. /* Initialized at boot time, and readonly after that */
  29. unsigned long long highmem;
  30. int kmalloc_ok = 0;
  31. /* Used during early boot */
  32. static unsigned long brk_end;
  33. static void map_cb(void *unused)
  34. {
  35. map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
  36. }
  37. #ifdef CONFIG_HIGHMEM
  38. static void setup_highmem(unsigned long highmem_start,
  39. unsigned long highmem_len)
  40. {
  41. struct page *page;
  42. unsigned long highmem_pfn;
  43. int i;
  44. highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
  45. for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
  46. page = &mem_map[highmem_pfn + i];
  47. ClearPageReserved(page);
  48. init_page_count(page);
  49. __free_page(page);
  50. }
  51. }
  52. #endif
  53. void __init mem_init(void)
  54. {
  55. /* clear the zero-page */
  56. memset(empty_zero_page, 0, PAGE_SIZE);
  57. /* Map in the area just after the brk now that kmalloc is about
  58. * to be turned on.
  59. */
  60. brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
  61. map_cb(NULL);
  62. initial_thread_cb(map_cb, NULL);
  63. free_bootmem(__pa(brk_end), uml_reserved - brk_end);
  64. uml_reserved = brk_end;
  65. /* this will put all low memory onto the freelists */
  66. totalram_pages = free_all_bootmem();
  67. max_low_pfn = totalram_pages;
  68. #ifdef CONFIG_HIGHMEM
  69. totalhigh_pages = highmem >> PAGE_SHIFT;
  70. totalram_pages += totalhigh_pages;
  71. #endif
  72. num_physpages = totalram_pages;
  73. max_pfn = totalram_pages;
  74. printk(KERN_INFO "Memory: %luk available\n",
  75. (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
  76. kmalloc_ok = 1;
  77. #ifdef CONFIG_HIGHMEM
  78. setup_highmem(end_iomem, highmem);
  79. #endif
  80. }
  81. /*
  82. * Create a page table and place a pointer to it in a middle page
  83. * directory entry.
  84. */
  85. static void __init one_page_table_init(pmd_t *pmd)
  86. {
  87. if (pmd_none(*pmd)) {
  88. pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  89. set_pmd(pmd, __pmd(_KERNPG_TABLE +
  90. (unsigned long) __pa(pte)));
  91. if (pte != pte_offset_kernel(pmd, 0))
  92. BUG();
  93. }
  94. }
  95. static void __init one_md_table_init(pud_t *pud)
  96. {
  97. #ifdef CONFIG_3_LEVEL_PGTABLES
  98. pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
  99. set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
  100. if (pmd_table != pmd_offset(pud, 0))
  101. BUG();
  102. #endif
  103. }
  104. static void __init fixrange_init(unsigned long start, unsigned long end,
  105. pgd_t *pgd_base)
  106. {
  107. pgd_t *pgd;
  108. pud_t *pud;
  109. pmd_t *pmd;
  110. int i, j;
  111. unsigned long vaddr;
  112. vaddr = start;
  113. i = pgd_index(vaddr);
  114. j = pmd_index(vaddr);
  115. pgd = pgd_base + i;
  116. for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
  117. pud = pud_offset(pgd, vaddr);
  118. if (pud_none(*pud))
  119. one_md_table_init(pud);
  120. pmd = pmd_offset(pud, vaddr);
  121. for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
  122. one_page_table_init(pmd);
  123. vaddr += PMD_SIZE;
  124. }
  125. j = 0;
  126. }
  127. }
  128. #ifdef CONFIG_HIGHMEM
  129. pte_t *kmap_pte;
  130. pgprot_t kmap_prot;
  131. #define kmap_get_fixmap_pte(vaddr) \
  132. pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
  133. (vaddr)), (vaddr))
  134. static void __init kmap_init(void)
  135. {
  136. unsigned long kmap_vstart;
  137. /* cache the first kmap pte */
  138. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  139. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  140. kmap_prot = PAGE_KERNEL;
  141. }
  142. static void __init init_highmem(void)
  143. {
  144. pgd_t *pgd;
  145. pud_t *pud;
  146. pmd_t *pmd;
  147. pte_t *pte;
  148. unsigned long vaddr;
  149. /*
  150. * Permanent kmaps:
  151. */
  152. vaddr = PKMAP_BASE;
  153. fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
  154. pgd = swapper_pg_dir + pgd_index(vaddr);
  155. pud = pud_offset(pgd, vaddr);
  156. pmd = pmd_offset(pud, vaddr);
  157. pte = pte_offset_kernel(pmd, vaddr);
  158. pkmap_page_table = pte;
  159. kmap_init();
  160. }
  161. #endif /* CONFIG_HIGHMEM */
  162. static void __init fixaddr_user_init( void)
  163. {
  164. #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
  165. long size = FIXADDR_USER_END - FIXADDR_USER_START;
  166. pgd_t *pgd;
  167. pud_t *pud;
  168. pmd_t *pmd;
  169. pte_t *pte;
  170. phys_t p;
  171. unsigned long v, vaddr = FIXADDR_USER_START;
  172. if (!size)
  173. return;
  174. fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
  175. v = (unsigned long) alloc_bootmem_low_pages(size);
  176. memcpy((void *) v , (void *) FIXADDR_USER_START, size);
  177. p = __pa(v);
  178. for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
  179. p += PAGE_SIZE) {
  180. pgd = swapper_pg_dir + pgd_index(vaddr);
  181. pud = pud_offset(pgd, vaddr);
  182. pmd = pmd_offset(pud, vaddr);
  183. pte = pte_offset_kernel(pmd, vaddr);
  184. pte_set_val(*pte, p, PAGE_READONLY);
  185. }
  186. #endif
  187. }
  188. void __init paging_init(void)
  189. {
  190. unsigned long zones_size[MAX_NR_ZONES], vaddr;
  191. int i;
  192. empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  193. empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
  194. for (i = 0; i < ARRAY_SIZE(zones_size); i++)
  195. zones_size[i] = 0;
  196. zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
  197. (uml_physmem >> PAGE_SHIFT);
  198. #ifdef CONFIG_HIGHMEM
  199. zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
  200. #endif
  201. free_area_init(zones_size);
  202. /*
  203. * Fixed mappings, only the page table structure has to be
  204. * created - mappings will be set by set_fixmap():
  205. */
  206. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  207. fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
  208. fixaddr_user_init();
  209. #ifdef CONFIG_HIGHMEM
  210. init_highmem();
  211. #endif
  212. }
  213. struct page *arch_validate(struct page *page, gfp_t mask, int order)
  214. {
  215. unsigned long addr, zero = 0;
  216. int i;
  217. again:
  218. if (page == NULL)
  219. return page;
  220. if (PageHighMem(page))
  221. return page;
  222. addr = (unsigned long) page_address(page);
  223. for (i = 0; i < (1 << order); i++) {
  224. current->thread.fault_addr = (void *) addr;
  225. if (__do_copy_to_user((void __user *) addr, &zero,
  226. sizeof(zero),
  227. &current->thread.fault_addr,
  228. &current->thread.fault_catcher)) {
  229. if (!(mask & __GFP_WAIT))
  230. return NULL;
  231. else break;
  232. }
  233. addr += PAGE_SIZE;
  234. }
  235. if (i == (1 << order))
  236. return page;
  237. page = alloc_pages(mask, order);
  238. goto again;
  239. }
  240. /*
  241. * This can't do anything because nothing in the kernel image can be freed
  242. * since it's not in kernel physical memory.
  243. */
  244. void free_initmem(void)
  245. {
  246. }
  247. #ifdef CONFIG_BLK_DEV_INITRD
  248. void free_initrd_mem(unsigned long start, unsigned long end)
  249. {
  250. if (start < end)
  251. printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
  252. (end - start) >> 10);
  253. for (; start < end; start += PAGE_SIZE) {
  254. ClearPageReserved(virt_to_page(start));
  255. init_page_count(virt_to_page(start));
  256. free_page(start);
  257. totalram_pages++;
  258. }
  259. }
  260. #endif
  261. void show_mem(void)
  262. {
  263. int pfn, total = 0, reserved = 0;
  264. int shared = 0, cached = 0;
  265. int highmem = 0;
  266. struct page *page;
  267. printk(KERN_INFO "Mem-info:\n");
  268. show_free_areas();
  269. printk(KERN_INFO "Free swap: %6ldkB\n",
  270. nr_swap_pages<<(PAGE_SHIFT-10));
  271. pfn = max_mapnr;
  272. while (pfn-- > 0) {
  273. page = pfn_to_page(pfn);
  274. total++;
  275. if (PageHighMem(page))
  276. highmem++;
  277. if (PageReserved(page))
  278. reserved++;
  279. else if (PageSwapCache(page))
  280. cached++;
  281. else if (page_count(page))
  282. shared += page_count(page) - 1;
  283. }
  284. printk(KERN_INFO "%d pages of RAM\n", total);
  285. printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
  286. printk(KERN_INFO "%d reserved pages\n", reserved);
  287. printk(KERN_INFO "%d pages shared\n", shared);
  288. printk(KERN_INFO "%d pages swap cached\n", cached);
  289. }
  290. /* Allocate and free page tables. */
  291. pgd_t *pgd_alloc(struct mm_struct *mm)
  292. {
  293. pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  294. if (pgd) {
  295. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  296. memcpy(pgd + USER_PTRS_PER_PGD,
  297. swapper_pg_dir + USER_PTRS_PER_PGD,
  298. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  299. }
  300. return pgd;
  301. }
  302. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  303. {
  304. free_page((unsigned long) pgd);
  305. }
  306. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  307. {
  308. pte_t *pte;
  309. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  310. return pte;
  311. }
  312. struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  313. {
  314. struct page *pte;
  315. pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  316. return pte;
  317. }
  318. #ifdef CONFIG_3_LEVEL_PGTABLES
  319. pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  320. {
  321. pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
  322. if (pmd)
  323. memset(pmd, 0, PAGE_SIZE);
  324. return pmd;
  325. }
  326. #endif