init_64.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. *
  9. * Derived from "arch/i386/mm/init.c"
  10. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  11. *
  12. * Dave Engebretsen <engebret@us.ibm.com>
  13. * Rework for PPC64 port.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #undef DEBUG
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/mman.h>
  29. #include <linux/mm.h>
  30. #include <linux/swap.h>
  31. #include <linux/stddef.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/bootmem.h>
  36. #include <linux/highmem.h>
  37. #include <linux/idr.h>
  38. #include <linux/nodemask.h>
  39. #include <linux/module.h>
  40. #include <linux/poison.h>
  41. #include <linux/lmb.h>
  42. #include <asm/pgalloc.h>
  43. #include <asm/page.h>
  44. #include <asm/prom.h>
  45. #include <asm/rtas.h>
  46. #include <asm/io.h>
  47. #include <asm/mmu_context.h>
  48. #include <asm/pgtable.h>
  49. #include <asm/mmu.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/smp.h>
  52. #include <asm/machdep.h>
  53. #include <asm/tlb.h>
  54. #include <asm/eeh.h>
  55. #include <asm/processor.h>
  56. #include <asm/mmzone.h>
  57. #include <asm/cputable.h>
  58. #include <asm/sections.h>
  59. #include <asm/system.h>
  60. #include <asm/iommu.h>
  61. #include <asm/abs_addr.h>
  62. #include <asm/vdso.h>
  63. #include "mmu_decl.h"
  64. #ifdef CONFIG_PPC_STD_MMU_64
  65. #if PGTABLE_RANGE > USER_VSID_RANGE
  66. #warning Limited user VSID range means pagetable space is wasted
  67. #endif
  68. #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  69. #warning TASK_SIZE is smaller than it needs to be.
  70. #endif
  71. #endif /* CONFIG_PPC_STD_MMU_64 */
  72. phys_addr_t memstart_addr = ~0;
  73. phys_addr_t kernstart_addr;
  74. void free_initmem(void)
  75. {
  76. unsigned long addr;
  77. addr = (unsigned long)__init_begin;
  78. for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
  79. memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
  80. ClearPageReserved(virt_to_page(addr));
  81. init_page_count(virt_to_page(addr));
  82. free_page(addr);
  83. totalram_pages++;
  84. }
  85. printk ("Freeing unused kernel memory: %luk freed\n",
  86. ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
  87. }
  88. #ifdef CONFIG_BLK_DEV_INITRD
  89. void free_initrd_mem(unsigned long start, unsigned long end)
  90. {
  91. if (start < end)
  92. printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
  93. for (; start < end; start += PAGE_SIZE) {
  94. ClearPageReserved(virt_to_page(start));
  95. init_page_count(virt_to_page(start));
  96. free_page(start);
  97. totalram_pages++;
  98. }
  99. }
  100. #endif
  101. #ifdef CONFIG_PROC_KCORE
  102. static struct kcore_list kcore_vmem;
  103. static int __init setup_kcore(void)
  104. {
  105. int i;
  106. for (i=0; i < lmb.memory.cnt; i++) {
  107. unsigned long base, size;
  108. struct kcore_list *kcore_mem;
  109. base = lmb.memory.region[i].base;
  110. size = lmb.memory.region[i].size;
  111. /* GFP_ATOMIC to avoid might_sleep warnings during boot */
  112. kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
  113. if (!kcore_mem)
  114. panic("%s: kmalloc failed\n", __func__);
  115. kclist_add(kcore_mem, __va(base), size);
  116. }
  117. kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
  118. return 0;
  119. }
  120. module_init(setup_kcore);
  121. #endif
  122. static void pgd_ctor(void *addr)
  123. {
  124. memset(addr, 0, PGD_TABLE_SIZE);
  125. }
  126. static void pmd_ctor(void *addr)
  127. {
  128. memset(addr, 0, PMD_TABLE_SIZE);
  129. }
  130. static const unsigned int pgtable_cache_size[2] = {
  131. PGD_TABLE_SIZE, PMD_TABLE_SIZE
  132. };
  133. static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
  134. #ifdef CONFIG_PPC_64K_PAGES
  135. "pgd_cache", "pmd_cache",
  136. #else
  137. "pgd_cache", "pud_pmd_cache",
  138. #endif /* CONFIG_PPC_64K_PAGES */
  139. };
  140. #ifdef CONFIG_HUGETLB_PAGE
  141. /* Hugepages need an extra cache per hugepagesize, initialized in
  142. * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
  143. * is not compile time constant. */
  144. struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
  145. #else
  146. struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
  147. #endif
  148. void pgtable_cache_init(void)
  149. {
  150. pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
  151. pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
  152. }
  153. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  154. /*
  155. * Given an address within the vmemmap, determine the pfn of the page that
  156. * represents the start of the section it is within. Note that we have to
  157. * do this by hand as the proffered address may not be correctly aligned.
  158. * Subtraction of non-aligned pointers produces undefined results.
  159. */
  160. static unsigned long __meminit vmemmap_section_start(unsigned long page)
  161. {
  162. unsigned long offset = page - ((unsigned long)(vmemmap));
  163. /* Return the pfn of the start of the section. */
  164. return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
  165. }
  166. /*
  167. * Check if this vmemmap page is already initialised. If any section
  168. * which overlaps this vmemmap page is initialised then this page is
  169. * initialised already.
  170. */
  171. static int __meminit vmemmap_populated(unsigned long start, int page_size)
  172. {
  173. unsigned long end = start + page_size;
  174. for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
  175. if (pfn_valid(vmemmap_section_start(start)))
  176. return 1;
  177. return 0;
  178. }
  179. /* On hash-based CPUs, the vmemmap is bolted in the hash table.
  180. *
  181. * On Book3E CPUs, the vmemmap is currently mapped in the top half of
  182. * the vmalloc space using normal page tables, though the size of
  183. * pages encoded in the PTEs can be different
  184. */
  185. #ifdef CONFIG_PPC_BOOK3E
  186. static void __meminit vmemmap_create_mapping(unsigned long start,
  187. unsigned long page_size,
  188. unsigned long phys)
  189. {
  190. /* Create a PTE encoding without page size */
  191. unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
  192. _PAGE_KERNEL_RW;
  193. /* PTEs only contain page size encodings up to 32M */
  194. BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
  195. /* Encode the size in the PTE */
  196. flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
  197. /* For each PTE for that area, map things. Note that we don't
  198. * increment phys because all PTEs are of the large size and
  199. * thus must have the low bits clear
  200. */
  201. for (i = 0; i < page_size; i += PAGE_SIZE)
  202. BUG_ON(map_kernel_page(start + i, phys, flags));
  203. }
  204. #else /* CONFIG_PPC_BOOK3E */
  205. static void __meminit vmemmap_create_mapping(unsigned long start,
  206. unsigned long page_size,
  207. unsigned long phys)
  208. {
  209. int mapped = htab_bolt_mapping(start, start + page_size, phys,
  210. PAGE_KERNEL, mmu_vmemmap_psize,
  211. mmu_kernel_ssize);
  212. BUG_ON(mapped < 0);
  213. }
  214. #endif /* CONFIG_PPC_BOOK3E */
  215. int __meminit vmemmap_populate(struct page *start_page,
  216. unsigned long nr_pages, int node)
  217. {
  218. unsigned long start = (unsigned long)start_page;
  219. unsigned long end = (unsigned long)(start_page + nr_pages);
  220. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  221. /* Align to the page size of the linear mapping. */
  222. start = _ALIGN_DOWN(start, page_size);
  223. pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
  224. start_page, nr_pages, node);
  225. pr_debug(" -> map %lx..%lx\n", start, end);
  226. for (; start < end; start += page_size) {
  227. void *p;
  228. if (vmemmap_populated(start, page_size))
  229. continue;
  230. p = vmemmap_alloc_block(page_size, node);
  231. if (!p)
  232. return -ENOMEM;
  233. pr_debug(" * %016lx..%016lx allocated at %p\n",
  234. start, start + page_size, p);
  235. vmemmap_create_mapping(start, page_size, __pa(p));
  236. }
  237. return 0;
  238. }
  239. #endif /* CONFIG_SPARSEMEM_VMEMMAP */