sparse-vmemmap.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * Virtual Memory Map support
  3. *
  4. * (C) 2007 sgi. Christoph Lameter.
  5. *
  6. * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
  7. * virt_to_page, page_address() to be implemented as a base offset
  8. * calculation without memory access.
  9. *
  10. * However, virtual mappings need a page table and TLBs. Many Linux
  11. * architectures already map their physical space using 1-1 mappings
  12. * via TLBs. For those arches the virtual memory map is essentially
  13. * for free if we use the same page size as the 1-1 mappings. In that
  14. * case the overhead consists of a few additional pages that are
  15. * allocated to create a view of memory for vmemmap.
  16. *
  17. * The architecture is expected to provide a vmemmap_populate() function
  18. * to instantiate the mapping.
  19. */
  20. #include <linux/mm.h>
  21. #include <linux/mmzone.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/highmem.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/sched.h>
  28. #include <asm/dma.h>
  29. #include <asm/pgalloc.h>
  30. #include <asm/pgtable.h>
  31. /*
  32. * Allocate a block of memory to be used to back the virtual memory map
  33. * or to back the page tables that are used to create the mapping.
  34. * Uses the main allocators if they are available, else bootmem.
  35. */
  36. static void * __init_refok __earlyonly_bootmem_alloc(int node,
  37. unsigned long size,
  38. unsigned long align,
  39. unsigned long goal)
  40. {
  41. return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
  42. }
  43. static void *vmemmap_buf;
  44. static void *vmemmap_buf_end;
  45. void * __meminit vmemmap_alloc_block(unsigned long size, int node)
  46. {
  47. /* If the main allocator is up use that, fallback to bootmem. */
  48. if (slab_is_available()) {
  49. struct page *page;
  50. if (node_state(node, N_HIGH_MEMORY))
  51. page = alloc_pages_node(
  52. node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
  53. get_order(size));
  54. else
  55. page = alloc_pages(
  56. GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
  57. get_order(size));
  58. if (page)
  59. return page_address(page);
  60. return NULL;
  61. } else
  62. return __earlyonly_bootmem_alloc(node, size, size,
  63. __pa(MAX_DMA_ADDRESS));
  64. }
  65. /* need to make sure size is all the same during early stage */
  66. void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
  67. {
  68. void *ptr;
  69. if (!vmemmap_buf)
  70. return vmemmap_alloc_block(size, node);
  71. /* take the from buf */
  72. ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
  73. if (ptr + size > vmemmap_buf_end)
  74. return vmemmap_alloc_block(size, node);
  75. vmemmap_buf = ptr + size;
  76. return ptr;
  77. }
  78. void __meminit vmemmap_verify(pte_t *pte, int node,
  79. unsigned long start, unsigned long end)
  80. {
  81. unsigned long pfn = pte_pfn(*pte);
  82. int actual_node = early_pfn_to_nid(pfn);
  83. if (node_distance(actual_node, node) > LOCAL_DISTANCE)
  84. printk(KERN_WARNING "[%lx-%lx] potential offnode "
  85. "page_structs\n", start, end - 1);
  86. }
  87. pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
  88. {
  89. pte_t *pte = pte_offset_kernel(pmd, addr);
  90. if (pte_none(*pte)) {
  91. pte_t entry;
  92. void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
  93. if (!p)
  94. return NULL;
  95. entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
  96. set_pte_at(&init_mm, addr, pte, entry);
  97. }
  98. return pte;
  99. }
  100. pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
  101. {
  102. pmd_t *pmd = pmd_offset(pud, addr);
  103. if (pmd_none(*pmd)) {
  104. void *p = vmemmap_alloc_block(PAGE_SIZE, node);
  105. if (!p)
  106. return NULL;
  107. pmd_populate_kernel(&init_mm, pmd, p);
  108. }
  109. return pmd;
  110. }
  111. pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
  112. {
  113. pud_t *pud = pud_offset(pgd, addr);
  114. if (pud_none(*pud)) {
  115. void *p = vmemmap_alloc_block(PAGE_SIZE, node);
  116. if (!p)
  117. return NULL;
  118. pud_populate(&init_mm, pud, p);
  119. }
  120. return pud;
  121. }
  122. pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
  123. {
  124. pgd_t *pgd = pgd_offset_k(addr);
  125. if (pgd_none(*pgd)) {
  126. void *p = vmemmap_alloc_block(PAGE_SIZE, node);
  127. if (!p)
  128. return NULL;
  129. pgd_populate(&init_mm, pgd, p);
  130. }
  131. return pgd;
  132. }
  133. int __meminit vmemmap_populate_basepages(unsigned long start,
  134. unsigned long end, int node)
  135. {
  136. unsigned long addr = start;
  137. pgd_t *pgd;
  138. pud_t *pud;
  139. pmd_t *pmd;
  140. pte_t *pte;
  141. for (; addr < end; addr += PAGE_SIZE) {
  142. pgd = vmemmap_pgd_populate(addr, node);
  143. if (!pgd)
  144. return -ENOMEM;
  145. pud = vmemmap_pud_populate(pgd, addr, node);
  146. if (!pud)
  147. return -ENOMEM;
  148. pmd = vmemmap_pmd_populate(pud, addr, node);
  149. if (!pmd)
  150. return -ENOMEM;
  151. pte = vmemmap_pte_populate(pmd, addr, node);
  152. if (!pte)
  153. return -ENOMEM;
  154. vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
  155. }
  156. return 0;
  157. }
  158. struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
  159. {
  160. unsigned long start;
  161. unsigned long end;
  162. struct page *map;
  163. map = pfn_to_page(pnum * PAGES_PER_SECTION);
  164. start = (unsigned long)map;
  165. end = (unsigned long)(map + PAGES_PER_SECTION);
  166. if (vmemmap_populate(start, end, nid))
  167. return NULL;
  168. return map;
  169. }
  170. void __init sparse_mem_maps_populate_node(struct page **map_map,
  171. unsigned long pnum_begin,
  172. unsigned long pnum_end,
  173. unsigned long map_count, int nodeid)
  174. {
  175. unsigned long pnum;
  176. unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
  177. void *vmemmap_buf_start;
  178. size = ALIGN(size, PMD_SIZE);
  179. vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
  180. PMD_SIZE, __pa(MAX_DMA_ADDRESS));
  181. if (vmemmap_buf_start) {
  182. vmemmap_buf = vmemmap_buf_start;
  183. vmemmap_buf_end = vmemmap_buf_start + size * map_count;
  184. }
  185. for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
  186. struct mem_section *ms;
  187. if (!present_section_nr(pnum))
  188. continue;
  189. map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
  190. if (map_map[pnum])
  191. continue;
  192. ms = __nr_to_section(pnum);
  193. printk(KERN_ERR "%s: sparsemem memory map backing failed "
  194. "some memory will not be available.\n", __func__);
  195. ms->section_mem_map = 0;
  196. }
  197. if (vmemmap_buf_start) {
  198. /* need to free left buf */
  199. free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
  200. vmemmap_buf = NULL;
  201. vmemmap_buf_end = NULL;
  202. }
  203. }