init_64.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7. * Copyright (C) 1996 Paul Mackerras
  8. *
  9. * Derived from "arch/i386/mm/init.c"
  10. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  11. *
  12. * Dave Engebretsen <engebret@us.ibm.com>
  13. * Rework for PPC64 port.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #undef DEBUG
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/mman.h>
  29. #include <linux/mm.h>
  30. #include <linux/swap.h>
  31. #include <linux/stddef.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/init.h>
  34. #include <linux/delay.h>
  35. #include <linux/bootmem.h>
  36. #include <linux/highmem.h>
  37. #include <linux/idr.h>
  38. #include <linux/nodemask.h>
  39. #include <linux/module.h>
  40. #include <linux/poison.h>
  41. #include <linux/memblock.h>
  42. #include <linux/hugetlb.h>
  43. #include <linux/slab.h>
  44. #include <asm/pgalloc.h>
  45. #include <asm/page.h>
  46. #include <asm/prom.h>
  47. #include <asm/rtas.h>
  48. #include <asm/io.h>
  49. #include <asm/mmu_context.h>
  50. #include <asm/pgtable.h>
  51. #include <asm/mmu.h>
  52. #include <asm/uaccess.h>
  53. #include <asm/smp.h>
  54. #include <asm/machdep.h>
  55. #include <asm/tlb.h>
  56. #include <asm/eeh.h>
  57. #include <asm/processor.h>
  58. #include <asm/mmzone.h>
  59. #include <asm/cputable.h>
  60. #include <asm/sections.h>
  61. #include <asm/system.h>
  62. #include <asm/iommu.h>
  63. #include <asm/abs_addr.h>
  64. #include <asm/vdso.h>
  65. #include "mmu_decl.h"
  66. #ifdef CONFIG_PPC_STD_MMU_64
  67. #if PGTABLE_RANGE > USER_VSID_RANGE
  68. #warning Limited user VSID range means pagetable space is wasted
  69. #endif
  70. #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  71. #warning TASK_SIZE is smaller than it needs to be.
  72. #endif
  73. #endif /* CONFIG_PPC_STD_MMU_64 */
  74. phys_addr_t memstart_addr = ~0;
  75. EXPORT_SYMBOL_GPL(memstart_addr);
  76. phys_addr_t kernstart_addr;
  77. EXPORT_SYMBOL_GPL(kernstart_addr);
  78. static void pgd_ctor(void *addr)
  79. {
  80. memset(addr, 0, PGD_TABLE_SIZE);
  81. }
  82. static void pmd_ctor(void *addr)
  83. {
  84. memset(addr, 0, PMD_TABLE_SIZE);
  85. }
  86. struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
  87. /*
  88. * Create a kmem_cache() for pagetables. This is not used for PTE
  89. * pages - they're linked to struct page, come from the normal free
  90. * pages pool and have a different entry size (see real_pte_t) to
  91. * everything else. Caches created by this function are used for all
  92. * the higher level pagetables, and for hugepage pagetables.
  93. */
  94. void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
  95. {
  96. char *name;
  97. unsigned long table_size = sizeof(void *) << shift;
  98. unsigned long align = table_size;
  99. /* When batching pgtable pointers for RCU freeing, we store
  100. * the index size in the low bits. Table alignment must be
  101. * big enough to fit it.
  102. *
  103. * Likewise, hugeapge pagetable pointers contain a (different)
  104. * shift value in the low bits. All tables must be aligned so
  105. * as to leave enough 0 bits in the address to contain it. */
  106. unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
  107. HUGEPD_SHIFT_MASK + 1);
  108. struct kmem_cache *new;
  109. /* It would be nice if this was a BUILD_BUG_ON(), but at the
  110. * moment, gcc doesn't seem to recognize is_power_of_2 as a
  111. * constant expression, so so much for that. */
  112. BUG_ON(!is_power_of_2(minalign));
  113. BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
  114. if (PGT_CACHE(shift))
  115. return; /* Already have a cache of this size */
  116. align = max_t(unsigned long, align, minalign);
  117. name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
  118. new = kmem_cache_create(name, table_size, align, 0, ctor);
  119. PGT_CACHE(shift) = new;
  120. pr_debug("Allocated pgtable cache for order %d\n", shift);
  121. }
  122. void pgtable_cache_init(void)
  123. {
  124. pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
  125. pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
  126. if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
  127. panic("Couldn't allocate pgtable caches");
  128. /* In all current configs, when the PUD index exists it's the
  129. * same size as either the pgd or pmd index. Verify that the
  130. * initialization above has also created a PUD cache. This
  131. * will need re-examiniation if we add new possibilities for
  132. * the pagetable layout. */
  133. BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
  134. }
  135. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  136. /*
  137. * Given an address within the vmemmap, determine the pfn of the page that
  138. * represents the start of the section it is within. Note that we have to
  139. * do this by hand as the proffered address may not be correctly aligned.
  140. * Subtraction of non-aligned pointers produces undefined results.
  141. */
  142. static unsigned long __meminit vmemmap_section_start(unsigned long page)
  143. {
  144. unsigned long offset = page - ((unsigned long)(vmemmap));
  145. /* Return the pfn of the start of the section. */
  146. return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
  147. }
  148. /*
  149. * Check if this vmemmap page is already initialised. If any section
  150. * which overlaps this vmemmap page is initialised then this page is
  151. * initialised already.
  152. */
  153. static int __meminit vmemmap_populated(unsigned long start, int page_size)
  154. {
  155. unsigned long end = start + page_size;
  156. for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
  157. if (pfn_valid(vmemmap_section_start(start)))
  158. return 1;
  159. return 0;
  160. }
  161. /* On hash-based CPUs, the vmemmap is bolted in the hash table.
  162. *
  163. * On Book3E CPUs, the vmemmap is currently mapped in the top half of
  164. * the vmalloc space using normal page tables, though the size of
  165. * pages encoded in the PTEs can be different
  166. */
  167. #ifdef CONFIG_PPC_BOOK3E
  168. static void __meminit vmemmap_create_mapping(unsigned long start,
  169. unsigned long page_size,
  170. unsigned long phys)
  171. {
  172. /* Create a PTE encoding without page size */
  173. unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
  174. _PAGE_KERNEL_RW;
  175. /* PTEs only contain page size encodings up to 32M */
  176. BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
  177. /* Encode the size in the PTE */
  178. flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
  179. /* For each PTE for that area, map things. Note that we don't
  180. * increment phys because all PTEs are of the large size and
  181. * thus must have the low bits clear
  182. */
  183. for (i = 0; i < page_size; i += PAGE_SIZE)
  184. BUG_ON(map_kernel_page(start + i, phys, flags));
  185. }
  186. #else /* CONFIG_PPC_BOOK3E */
  187. static void __meminit vmemmap_create_mapping(unsigned long start,
  188. unsigned long page_size,
  189. unsigned long phys)
  190. {
  191. int mapped = htab_bolt_mapping(start, start + page_size, phys,
  192. PAGE_KERNEL, mmu_vmemmap_psize,
  193. mmu_kernel_ssize);
  194. BUG_ON(mapped < 0);
  195. }
  196. #endif /* CONFIG_PPC_BOOK3E */
  197. struct vmemmap_backing *vmemmap_list;
  198. static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
  199. {
  200. static struct vmemmap_backing *next;
  201. static int num_left;
  202. /* allocate a page when required and hand out chunks */
  203. if (!next || !num_left) {
  204. next = vmemmap_alloc_block(PAGE_SIZE, node);
  205. if (unlikely(!next)) {
  206. WARN_ON(1);
  207. return NULL;
  208. }
  209. num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
  210. }
  211. num_left--;
  212. return next++;
  213. }
  214. static __meminit void vmemmap_list_populate(unsigned long phys,
  215. unsigned long start,
  216. int node)
  217. {
  218. struct vmemmap_backing *vmem_back;
  219. vmem_back = vmemmap_list_alloc(node);
  220. if (unlikely(!vmem_back)) {
  221. WARN_ON(1);
  222. return;
  223. }
  224. vmem_back->phys = phys;
  225. vmem_back->virt_addr = start;
  226. vmem_back->list = vmemmap_list;
  227. vmemmap_list = vmem_back;
  228. }
  229. int __meminit vmemmap_populate(struct page *start_page,
  230. unsigned long nr_pages, int node)
  231. {
  232. unsigned long start = (unsigned long)start_page;
  233. unsigned long end = (unsigned long)(start_page + nr_pages);
  234. unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
  235. /* Align to the page size of the linear mapping. */
  236. start = _ALIGN_DOWN(start, page_size);
  237. pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
  238. start_page, nr_pages, node);
  239. pr_debug(" -> map %lx..%lx\n", start, end);
  240. for (; start < end; start += page_size) {
  241. void *p;
  242. if (vmemmap_populated(start, page_size))
  243. continue;
  244. p = vmemmap_alloc_block(page_size, node);
  245. if (!p)
  246. return -ENOMEM;
  247. vmemmap_list_populate(__pa(p), start, node);
  248. pr_debug(" * %016lx..%016lx allocated at %p\n",
  249. start, start + page_size, p);
  250. vmemmap_create_mapping(start, page_size, __pa(p));
  251. }
  252. return 0;
  253. }
  254. #endif /* CONFIG_SPARSEMEM_VMEMMAP */