numa_32.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
  3. * August 2002: added remote node KVA remap - Martin J. Bligh
  4. *
  5. * Copyright (C) 2002, IBM Corp.
  6. *
  7. * All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  17. * NON INFRINGEMENT. See the GNU General Public License for more
  18. * details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/memblock.h>
  27. #include <linux/mmzone.h>
  28. #include <linux/highmem.h>
  29. #include <linux/initrd.h>
  30. #include <linux/nodemask.h>
  31. #include <linux/module.h>
  32. #include <linux/kexec.h>
  33. #include <linux/pfn.h>
  34. #include <linux/swap.h>
  35. #include <linux/acpi.h>
  36. #include <asm/e820.h>
  37. #include <asm/setup.h>
  38. #include <asm/mmzone.h>
  39. #include <asm/bios_ebda.h>
  40. #include <asm/proto.h>
  41. struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  42. EXPORT_SYMBOL(node_data);
  43. /*
  44. * numa interface - we expect the numa architecture specific code to have
  45. * populated the following initialisation.
  46. *
  47. * 1) node_online_map - the map of all nodes configured (online) in the system
  48. * 2) node_start_pfn - the starting page frame number for a node
  49. * 3) node_end_pfn - the ending page fram number for a node
  50. */
  51. unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
  52. unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
  53. #ifdef CONFIG_DISCONTIGMEM
  54. /*
  55. * 4) physnode_map - the mapping between a pfn and owning node
  56. * physnode_map keeps track of the physical memory layout of a generic
  57. * numa node on a 64Mb break (each element of the array will
  58. * represent 64Mb of memory and will be marked by the node id. so,
  59. * if the first gig is on node 0, and the second gig is on node 1
  60. * physnode_map will contain:
  61. *
  62. * physnode_map[0-15] = 0;
  63. * physnode_map[16-31] = 1;
  64. * physnode_map[32- ] = -1;
  65. */
  66. s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
  67. EXPORT_SYMBOL(physnode_map);
  68. void memory_present(int nid, unsigned long start, unsigned long end)
  69. {
  70. unsigned long pfn;
  71. printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n",
  72. nid, start, end);
  73. printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
  74. printk(KERN_DEBUG " ");
  75. for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
  76. physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
  77. printk(KERN_CONT "%lx ", pfn);
  78. }
  79. printk(KERN_CONT "\n");
  80. }
  81. unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
  82. unsigned long end_pfn)
  83. {
  84. unsigned long nr_pages = end_pfn - start_pfn;
  85. if (!nr_pages)
  86. return 0;
  87. return (nr_pages + 1) * sizeof(struct page);
  88. }
  89. #endif
  90. extern unsigned long find_max_low_pfn(void);
  91. extern unsigned long highend_pfn, highstart_pfn;
  92. #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
  93. static unsigned long node_remap_size[MAX_NUMNODES];
  94. static void *node_remap_start_vaddr[MAX_NUMNODES];
  95. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
  96. int __cpuinit numa_cpu_node(int cpu)
  97. {
  98. return apic->x86_32_numa_cpu_node(cpu);
  99. }
  100. /*
  101. * FLAT - support for basic PC memory model with discontig enabled, essentially
  102. * a single node with all available processors in it with a flat
  103. * memory map.
  104. */
  105. int __init get_memcfg_numa_flat(void)
  106. {
  107. printk(KERN_DEBUG "NUMA - single node, flat memory mode\n");
  108. node_start_pfn[0] = 0;
  109. node_end_pfn[0] = max_pfn;
  110. memblock_x86_register_active_regions(0, 0, max_pfn);
  111. memory_present(0, 0, max_pfn);
  112. /* Indicate there is one node available. */
  113. nodes_clear(node_online_map);
  114. node_set_online(0);
  115. return 1;
  116. }
  117. /*
  118. * Find the highest page frame number we have available for the node
  119. */
  120. static void __init propagate_e820_map_node(int nid)
  121. {
  122. if (node_end_pfn[nid] > max_pfn)
  123. node_end_pfn[nid] = max_pfn;
  124. /*
  125. * if a user has given mem=XXXX, then we need to make sure
  126. * that the node _starts_ before that, too, not just ends
  127. */
  128. if (node_start_pfn[nid] > max_pfn)
  129. node_start_pfn[nid] = max_pfn;
  130. BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
  131. }
  132. /*
  133. * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
  134. * method. For node zero take this from the bottom of memory, for
  135. * subsequent nodes place them at node_remap_start_vaddr which contains
  136. * node local data in physically node local memory. See setup_memory()
  137. * for details.
  138. */
  139. static void __init allocate_pgdat(int nid)
  140. {
  141. char buf[16];
  142. NODE_DATA(nid) = alloc_remap(nid, ALIGN(sizeof(pg_data_t), PAGE_SIZE));
  143. if (!NODE_DATA(nid)) {
  144. unsigned long pgdat_phys;
  145. pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
  146. max_pfn_mapped<<PAGE_SHIFT,
  147. sizeof(pg_data_t),
  148. PAGE_SIZE);
  149. NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
  150. memset(buf, 0, sizeof(buf));
  151. sprintf(buf, "NODE_DATA %d", nid);
  152. memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
  153. }
  154. printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
  155. nid, (unsigned long)NODE_DATA(nid));
  156. }
  157. /*
  158. * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel
  159. * virtual address space (KVA) is reserved and portions of nodes are mapped
  160. * using it. This is to allow node-local memory to be allocated for
  161. * structures that would normally require ZONE_NORMAL. The memory is
  162. * allocated with alloc_remap() and callers should be prepared to allocate
  163. * from the bootmem allocator instead.
  164. */
  165. static unsigned long node_remap_start_pfn[MAX_NUMNODES];
  166. static void *node_remap_end_vaddr[MAX_NUMNODES];
  167. static void *node_remap_alloc_vaddr[MAX_NUMNODES];
  168. void *alloc_remap(int nid, unsigned long size)
  169. {
  170. void *allocation = node_remap_alloc_vaddr[nid];
  171. size = ALIGN(size, L1_CACHE_BYTES);
  172. if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
  173. return NULL;
  174. node_remap_alloc_vaddr[nid] += size;
  175. memset(allocation, 0, size);
  176. return allocation;
  177. }
  178. #ifdef CONFIG_HIBERNATION
  179. /**
  180. * resume_map_numa_kva - add KVA mapping to the temporary page tables created
  181. * during resume from hibernation
  182. * @pgd_base - temporary resume page directory
  183. */
  184. void resume_map_numa_kva(pgd_t *pgd_base)
  185. {
  186. int node;
  187. for_each_online_node(node) {
  188. unsigned long start_va, start_pfn, size, pfn;
  189. start_va = (unsigned long)node_remap_start_vaddr[node];
  190. start_pfn = node_remap_start_pfn[node];
  191. size = node_remap_size[node];
  192. printk(KERN_DEBUG "%s: node %d\n", __func__, node);
  193. for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
  194. unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
  195. pgd_t *pgd = pgd_base + pgd_index(vaddr);
  196. pud_t *pud = pud_offset(pgd, vaddr);
  197. pmd_t *pmd = pmd_offset(pud, vaddr);
  198. set_pmd(pmd, pfn_pmd(start_pfn + pfn,
  199. PAGE_KERNEL_LARGE_EXEC));
  200. printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
  201. __func__, vaddr, start_pfn + pfn);
  202. }
  203. }
  204. }
  205. #endif
  206. static __init void init_alloc_remap(int nid)
  207. {
  208. unsigned long size, pfn;
  209. u64 node_pa, remap_pa;
  210. void *remap_va;
  211. /*
  212. * The acpi/srat node info can show hot-add memroy zones where
  213. * memory could be added but not currently present.
  214. */
  215. printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
  216. nid, node_start_pfn[nid], node_end_pfn[nid]);
  217. if (node_start_pfn[nid] > max_pfn)
  218. return;
  219. if (!node_end_pfn[nid])
  220. return;
  221. if (node_end_pfn[nid] > max_pfn)
  222. node_end_pfn[nid] = max_pfn;
  223. /* calculate the necessary space aligned to large page size */
  224. size = node_memmap_size_bytes(nid, node_start_pfn[nid],
  225. min(node_end_pfn[nid], max_pfn));
  226. size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
  227. size = ALIGN(size, LARGE_PAGE_BYTES);
  228. /* allocate node memory and the lowmem remap area */
  229. node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
  230. (u64)node_end_pfn[nid] << PAGE_SHIFT,
  231. size, LARGE_PAGE_BYTES);
  232. if (node_pa == MEMBLOCK_ERROR) {
  233. pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
  234. size, nid);
  235. return;
  236. }
  237. memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
  238. remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
  239. max_low_pfn << PAGE_SHIFT,
  240. size, LARGE_PAGE_BYTES);
  241. if (remap_pa == MEMBLOCK_ERROR) {
  242. pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
  243. size, nid);
  244. memblock_x86_free_range(node_pa, node_pa + size);
  245. return;
  246. }
  247. memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
  248. remap_va = phys_to_virt(remap_pa);
  249. /* perform actual remap */
  250. for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
  251. set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
  252. (node_pa >> PAGE_SHIFT) + pfn,
  253. PAGE_KERNEL_LARGE);
  254. /* initialize remap allocator parameters */
  255. node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
  256. node_remap_size[nid] = size >> PAGE_SHIFT;
  257. node_remap_start_vaddr[nid] = remap_va;
  258. node_remap_end_vaddr[nid] = remap_va + size;
  259. node_remap_alloc_vaddr[nid] = remap_va;
  260. printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
  261. nid, node_pa, node_pa + size, remap_va, remap_va + size);
  262. }
  263. void __init initmem_init(void)
  264. {
  265. int nid;
  266. /*
  267. * When mapping a NUMA machine we allocate the node_mem_map arrays
  268. * from node local memory. They are then mapped directly into KVA
  269. * between zone normal and vmalloc space. Calculate the size of
  270. * this space and use it to adjust the boundary between ZONE_NORMAL
  271. * and ZONE_HIGHMEM.
  272. */
  273. get_memcfg_numa();
  274. numa_init_array();
  275. for_each_online_node(nid)
  276. init_alloc_remap(nid);
  277. #ifdef CONFIG_HIGHMEM
  278. highstart_pfn = highend_pfn = max_pfn;
  279. if (max_pfn > max_low_pfn)
  280. highstart_pfn = max_low_pfn;
  281. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  282. pages_to_mb(highend_pfn - highstart_pfn));
  283. num_physpages = highend_pfn;
  284. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  285. #else
  286. num_physpages = max_low_pfn;
  287. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  288. #endif
  289. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  290. pages_to_mb(max_low_pfn));
  291. printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n",
  292. max_low_pfn, highstart_pfn);
  293. printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
  294. (ulong) pfn_to_kaddr(max_low_pfn));
  295. for_each_online_node(nid)
  296. allocate_pgdat(nid);
  297. printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
  298. (ulong) pfn_to_kaddr(highstart_pfn));
  299. for_each_online_node(nid)
  300. propagate_e820_map_node(nid);
  301. for_each_online_node(nid) {
  302. memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
  303. NODE_DATA(nid)->node_id = nid;
  304. }
  305. setup_bootmem_allocator();
  306. }
  307. #ifdef CONFIG_MEMORY_HOTPLUG
  308. static int paddr_to_nid(u64 addr)
  309. {
  310. int nid;
  311. unsigned long pfn = PFN_DOWN(addr);
  312. for_each_node(nid)
  313. if (node_start_pfn[nid] <= pfn &&
  314. pfn < node_end_pfn[nid])
  315. return nid;
  316. return -1;
  317. }
  318. /*
  319. * This function is used to ask node id BEFORE memmap and mem_section's
  320. * initialization (pfn_to_nid() can't be used yet).
  321. * If _PXM is not defined on ACPI's DSDT, node id must be found by this.
  322. */
  323. int memory_add_physaddr_to_nid(u64 addr)
  324. {
  325. int nid = paddr_to_nid(addr);
  326. return (nid >= 0) ? nid : 0;
  327. }
  328. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  329. #endif