discontig_32.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /*
  2. * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
  3. * August 2002: added remote node KVA remap - Martin J. Bligh
  4. *
  5. * Copyright (C) 2002, IBM Corp.
  6. *
  7. * All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  17. * NON INFRINGEMENT. See the GNU General Public License for more
  18. * details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <linux/mm.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/mmzone.h>
  27. #include <linux/highmem.h>
  28. #include <linux/initrd.h>
  29. #include <linux/nodemask.h>
  30. #include <linux/module.h>
  31. #include <linux/kexec.h>
  32. #include <linux/pfn.h>
  33. #include <linux/swap.h>
  34. #include <linux/acpi.h>
  35. #include <asm/e820.h>
  36. #include <asm/setup.h>
  37. #include <asm/mmzone.h>
  38. #include <asm/bios_ebda.h>
  39. #include <asm/proto.h>
  40. struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  41. EXPORT_SYMBOL(node_data);
  42. static bootmem_data_t node0_bdata;
  43. /*
  44. * numa interface - we expect the numa architecture specific code to have
  45. * populated the following initialisation.
  46. *
  47. * 1) node_online_map - the map of all nodes configured (online) in the system
  48. * 2) node_start_pfn - the starting page frame number for a node
  49. * 3) node_end_pfn - the ending page fram number for a node
  50. */
  51. unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
  52. unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
  53. #ifdef CONFIG_DISCONTIGMEM
  54. /*
  55. * 4) physnode_map - the mapping between a pfn and owning node
  56. * physnode_map keeps track of the physical memory layout of a generic
  57. * numa node on a 64Mb break (each element of the array will
  58. * represent 64Mb of memory and will be marked by the node id. so,
  59. * if the first gig is on node 0, and the second gig is on node 1
  60. * physnode_map will contain:
  61. *
  62. * physnode_map[0-15] = 0;
  63. * physnode_map[16-31] = 1;
  64. * physnode_map[32- ] = -1;
  65. */
  66. s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
  67. EXPORT_SYMBOL(physnode_map);
  68. void memory_present(int nid, unsigned long start, unsigned long end)
  69. {
  70. unsigned long pfn;
  71. printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n",
  72. nid, start, end);
  73. printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
  74. printk(KERN_DEBUG " ");
  75. for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
  76. physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
  77. printk(KERN_CONT "%ld ", pfn);
  78. }
  79. printk(KERN_CONT "\n");
  80. }
  81. unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
  82. unsigned long end_pfn)
  83. {
  84. unsigned long nr_pages = end_pfn - start_pfn;
  85. if (!nr_pages)
  86. return 0;
  87. return (nr_pages + 1) * sizeof(struct page);
  88. }
  89. #endif
  90. extern unsigned long find_max_low_pfn(void);
  91. extern unsigned long highend_pfn, highstart_pfn;
  92. #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
  93. unsigned long node_remap_size[MAX_NUMNODES];
  94. static void *node_remap_start_vaddr[MAX_NUMNODES];
  95. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
  96. static unsigned long kva_start_pfn;
  97. static unsigned long kva_pages;
  98. /*
  99. * FLAT - support for basic PC memory model with discontig enabled, essentially
  100. * a single node with all available processors in it with a flat
  101. * memory map.
  102. */
  103. int __init get_memcfg_numa_flat(void)
  104. {
  105. printk("NUMA - single node, flat memory mode\n");
  106. node_start_pfn[0] = 0;
  107. node_end_pfn[0] = max_pfn;
  108. e820_register_active_regions(0, 0, max_pfn);
  109. memory_present(0, 0, max_pfn);
  110. node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
  111. /* Indicate there is one node available. */
  112. nodes_clear(node_online_map);
  113. node_set_online(0);
  114. return 1;
  115. }
  116. /*
  117. * Find the highest page frame number we have available for the node
  118. */
  119. static void __init propagate_e820_map_node(int nid)
  120. {
  121. if (node_end_pfn[nid] > max_pfn)
  122. node_end_pfn[nid] = max_pfn;
  123. /*
  124. * if a user has given mem=XXXX, then we need to make sure
  125. * that the node _starts_ before that, too, not just ends
  126. */
  127. if (node_start_pfn[nid] > max_pfn)
  128. node_start_pfn[nid] = max_pfn;
  129. BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
  130. }
  131. /*
  132. * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
  133. * method. For node zero take this from the bottom of memory, for
  134. * subsequent nodes place them at node_remap_start_vaddr which contains
  135. * node local data in physically node local memory. See setup_memory()
  136. * for details.
  137. */
  138. static void __init allocate_pgdat(int nid)
  139. {
  140. if (nid && node_has_online_mem(nid) && node_remap_start_vaddr[nid])
  141. NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
  142. else {
  143. unsigned long pgdat_phys;
  144. pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT,
  145. (nid ? max_low_pfn:max_pfn_mapped)<<PAGE_SHIFT,
  146. sizeof(pg_data_t),
  147. PAGE_SIZE);
  148. NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
  149. reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t),
  150. "NODE_DATA");
  151. }
  152. printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
  153. nid, (unsigned long)NODE_DATA(nid));
  154. }
  155. /*
  156. * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel
  157. * virtual address space (KVA) is reserved and portions of nodes are mapped
  158. * using it. This is to allow node-local memory to be allocated for
  159. * structures that would normally require ZONE_NORMAL. The memory is
  160. * allocated with alloc_remap() and callers should be prepared to allocate
  161. * from the bootmem allocator instead.
  162. */
  163. static unsigned long node_remap_start_pfn[MAX_NUMNODES];
  164. static void *node_remap_end_vaddr[MAX_NUMNODES];
  165. static void *node_remap_alloc_vaddr[MAX_NUMNODES];
  166. static unsigned long node_remap_offset[MAX_NUMNODES];
  167. void *alloc_remap(int nid, unsigned long size)
  168. {
  169. void *allocation = node_remap_alloc_vaddr[nid];
  170. size = ALIGN(size, L1_CACHE_BYTES);
  171. if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
  172. return 0;
  173. node_remap_alloc_vaddr[nid] += size;
  174. memset(allocation, 0, size);
  175. return allocation;
  176. }
  177. void __init remap_numa_kva(void)
  178. {
  179. void *vaddr;
  180. unsigned long pfn;
  181. int node;
  182. for_each_online_node(node) {
  183. printk(KERN_DEBUG "remap_numa_kva: node %d\n", node);
  184. for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
  185. vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
  186. printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n",
  187. (unsigned long)vaddr,
  188. node_remap_start_pfn[node] + pfn);
  189. set_pmd_pfn((ulong) vaddr,
  190. node_remap_start_pfn[node] + pfn,
  191. PAGE_KERNEL_LARGE);
  192. }
  193. }
  194. }
  195. static unsigned long calculate_numa_remap_pages(void)
  196. {
  197. int nid;
  198. unsigned long size, reserve_pages = 0;
  199. for_each_online_node(nid) {
  200. u64 node_kva_target;
  201. u64 node_kva_final;
  202. /*
  203. * The acpi/srat node info can show hot-add memroy zones
  204. * where memory could be added but not currently present.
  205. */
  206. printk("node %d pfn: [%lx - %lx]\n",
  207. nid, node_start_pfn[nid], node_end_pfn[nid]);
  208. if (node_start_pfn[nid] > max_pfn)
  209. continue;
  210. if (!node_end_pfn[nid])
  211. continue;
  212. if (node_end_pfn[nid] > max_pfn)
  213. node_end_pfn[nid] = max_pfn;
  214. /* ensure the remap includes space for the pgdat. */
  215. size = node_remap_size[nid] + sizeof(pg_data_t);
  216. /* convert size to large (pmd size) pages, rounding up */
  217. size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
  218. /* now the roundup is correct, convert to PAGE_SIZE pages */
  219. size = size * PTRS_PER_PTE;
  220. node_kva_target = round_down(node_end_pfn[nid] - size,
  221. PTRS_PER_PTE);
  222. node_kva_target <<= PAGE_SHIFT;
  223. do {
  224. node_kva_final = find_e820_area(node_kva_target,
  225. ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
  226. ((u64)size)<<PAGE_SHIFT,
  227. LARGE_PAGE_BYTES);
  228. node_kva_target -= LARGE_PAGE_BYTES;
  229. } while (node_kva_final == -1ULL &&
  230. (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
  231. if (node_kva_final == -1ULL)
  232. panic("Can not get kva ram\n");
  233. node_remap_size[nid] = size;
  234. node_remap_offset[nid] = reserve_pages;
  235. reserve_pages += size;
  236. printk("Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
  237. size, nid, node_kva_final>>PAGE_SHIFT);
  238. /*
  239. * prevent kva address below max_low_pfn want it on system
  240. * with less memory later.
  241. * layout will be: KVA address , KVA RAM
  242. *
  243. * we are supposed to only record the one less then max_low_pfn
  244. * but we could have some hole in high memory, and it will only
  245. * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
  246. * to use it as free.
  247. * So reserve_early here, hope we don't run out of that array
  248. */
  249. reserve_early(node_kva_final,
  250. node_kva_final+(((u64)size)<<PAGE_SHIFT),
  251. "KVA RAM");
  252. node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
  253. remove_active_range(nid, node_remap_start_pfn[nid],
  254. node_remap_start_pfn[nid] + size);
  255. }
  256. printk("Reserving total of %ld pages for numa KVA remap\n",
  257. reserve_pages);
  258. return reserve_pages;
  259. }
  260. static void init_remap_allocator(int nid)
  261. {
  262. node_remap_start_vaddr[nid] = pfn_to_kaddr(
  263. kva_start_pfn + node_remap_offset[nid]);
  264. node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
  265. (node_remap_size[nid] * PAGE_SIZE);
  266. node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
  267. ALIGN(sizeof(pg_data_t), PAGE_SIZE);
  268. printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
  269. (ulong) node_remap_start_vaddr[nid],
  270. (ulong) node_remap_end_vaddr[nid]);
  271. }
  272. unsigned long __init initmem_init(unsigned long start_pfn,
  273. unsigned long end_pfn)
  274. {
  275. int nid;
  276. unsigned long system_start_pfn, system_max_low_pfn;
  277. long kva_target_pfn;
  278. /*
  279. * When mapping a NUMA machine we allocate the node_mem_map arrays
  280. * from node local memory. They are then mapped directly into KVA
  281. * between zone normal and vmalloc space. Calculate the size of
  282. * this space and use it to adjust the boundary between ZONE_NORMAL
  283. * and ZONE_HIGHMEM.
  284. */
  285. /* call find_max_low_pfn at first, it could update max_pfn */
  286. system_max_low_pfn = max_low_pfn = find_max_low_pfn();
  287. remove_all_active_ranges();
  288. get_memcfg_numa();
  289. kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE);
  290. /* partially used pages are not usable - thus round upwards */
  291. system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
  292. kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
  293. do {
  294. kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
  295. max_low_pfn<<PAGE_SHIFT,
  296. kva_pages<<PAGE_SHIFT,
  297. PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
  298. kva_target_pfn -= PTRS_PER_PTE;
  299. } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn);
  300. if (kva_start_pfn == -1UL)
  301. panic("Can not get kva space\n");
  302. printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
  303. kva_start_pfn, max_low_pfn);
  304. printk("max_pfn = %ld\n", max_pfn);
  305. /* avoid clash with initrd */
  306. reserve_early(kva_start_pfn<<PAGE_SHIFT,
  307. (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
  308. "KVA PG");
  309. #ifdef CONFIG_HIGHMEM
  310. highstart_pfn = highend_pfn = max_pfn;
  311. if (max_pfn > system_max_low_pfn)
  312. highstart_pfn = system_max_low_pfn;
  313. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  314. pages_to_mb(highend_pfn - highstart_pfn));
  315. num_physpages = highend_pfn;
  316. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  317. #else
  318. num_physpages = system_max_low_pfn;
  319. high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
  320. #endif
  321. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  322. pages_to_mb(system_max_low_pfn));
  323. printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
  324. min_low_pfn, max_low_pfn, highstart_pfn);
  325. printk("Low memory ends at vaddr %08lx\n",
  326. (ulong) pfn_to_kaddr(max_low_pfn));
  327. for_each_online_node(nid) {
  328. init_remap_allocator(nid);
  329. allocate_pgdat(nid);
  330. }
  331. printk("High memory starts at vaddr %08lx\n",
  332. (ulong) pfn_to_kaddr(highstart_pfn));
  333. for_each_online_node(nid)
  334. propagate_e820_map_node(nid);
  335. memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
  336. NODE_DATA(0)->bdata = &node0_bdata;
  337. setup_bootmem_allocator();
  338. return max_low_pfn;
  339. }
  340. void __init zone_sizes_init(void)
  341. {
  342. unsigned long max_zone_pfns[MAX_NR_ZONES];
  343. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  344. max_zone_pfns[ZONE_DMA] =
  345. virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
  346. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  347. #ifdef CONFIG_HIGHMEM
  348. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  349. #endif
  350. free_area_init_nodes(max_zone_pfns);
  351. return;
  352. }
  353. void __init set_highmem_pages_init(void)
  354. {
  355. #ifdef CONFIG_HIGHMEM
  356. struct zone *zone;
  357. int nid;
  358. for_each_zone(zone) {
  359. unsigned long zone_start_pfn, zone_end_pfn;
  360. if (!is_highmem(zone))
  361. continue;
  362. zone_start_pfn = zone->zone_start_pfn;
  363. zone_end_pfn = zone_start_pfn + zone->spanned_pages;
  364. nid = zone_to_nid(zone);
  365. printk("Initializing %s for node %d (%08lx:%08lx)\n",
  366. zone->name, nid, zone_start_pfn, zone_end_pfn);
  367. add_highpages_with_active_regions(nid, zone_start_pfn,
  368. zone_end_pfn);
  369. }
  370. totalram_pages += totalhigh_pages;
  371. #endif
  372. }
  373. #ifdef CONFIG_MEMORY_HOTPLUG
  374. static int paddr_to_nid(u64 addr)
  375. {
  376. int nid;
  377. unsigned long pfn = PFN_DOWN(addr);
  378. for_each_node(nid)
  379. if (node_start_pfn[nid] <= pfn &&
  380. pfn < node_end_pfn[nid])
  381. return nid;
  382. return -1;
  383. }
  384. /*
  385. * This function is used to ask node id BEFORE memmap and mem_section's
  386. * initialization (pfn_to_nid() can't be used yet).
  387. * If _PXM is not defined on ACPI's DSDT, node id must be found by this.
  388. */
  389. int memory_add_physaddr_to_nid(u64 addr)
  390. {
  391. int nid = paddr_to_nid(addr);
  392. return (nid >= 0) ? nid : 0;
  393. }
  394. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  395. #endif
  396. #if defined(CONFIG_ACPI_NUMA) && !defined(CONFIG_HAVE_ARCH_PARSE_SRAT)
  397. /*
  398. * Dummy on 32-bit, for now:
  399. */
  400. void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
  401. {
  402. }
  403. void __init
  404. acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
  405. {
  406. }
  407. void __init acpi_numa_arch_fixup(void)
  408. {
  409. }
  410. #endif