sparse.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /*
  2. * sparse memory mappings.
  3. */
  4. #include <linux/mm.h>
  5. #include <linux/mmzone.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/highmem.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/vmalloc.h>
  11. #include <asm/dma.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. /*
  15. * Permanent SPARSEMEM data:
  16. *
  17. * 1) mem_section - memory sections, mem_map's for valid memory
  18. */
  19. #ifdef CONFIG_SPARSEMEM_EXTREME
  20. struct mem_section *mem_section[NR_SECTION_ROOTS]
  21. ____cacheline_internodealigned_in_smp;
  22. #else
  23. struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
  24. ____cacheline_internodealigned_in_smp;
  25. #endif
  26. EXPORT_SYMBOL(mem_section);
  27. #ifdef NODE_NOT_IN_PAGE_FLAGS
  28. /*
  29. * If we did not store the node number in the page then we have to
  30. * do a lookup in the section_to_node_table in order to find which
  31. * node the page belongs to.
  32. */
  33. #if MAX_NUMNODES <= 256
  34. static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  35. #else
  36. static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  37. #endif
  38. int page_to_nid(struct page *page)
  39. {
  40. return section_to_node_table[page_to_section(page)];
  41. }
  42. EXPORT_SYMBOL(page_to_nid);
  43. static void set_section_nid(unsigned long section_nr, int nid)
  44. {
  45. section_to_node_table[section_nr] = nid;
  46. }
  47. #else /* !NODE_NOT_IN_PAGE_FLAGS */
  48. static inline void set_section_nid(unsigned long section_nr, int nid)
  49. {
  50. }
  51. #endif
  52. #ifdef CONFIG_SPARSEMEM_EXTREME
  53. static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
  54. {
  55. struct mem_section *section = NULL;
  56. unsigned long array_size = SECTIONS_PER_ROOT *
  57. sizeof(struct mem_section);
  58. if (slab_is_available())
  59. section = kmalloc_node(array_size, GFP_KERNEL, nid);
  60. else
  61. section = alloc_bootmem_node(NODE_DATA(nid), array_size);
  62. if (section)
  63. memset(section, 0, array_size);
  64. return section;
  65. }
  66. static int __meminit sparse_index_init(unsigned long section_nr, int nid)
  67. {
  68. static DEFINE_SPINLOCK(index_init_lock);
  69. unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  70. struct mem_section *section;
  71. int ret = 0;
  72. if (mem_section[root])
  73. return -EEXIST;
  74. section = sparse_index_alloc(nid);
  75. if (!section)
  76. return -ENOMEM;
  77. /*
  78. * This lock keeps two different sections from
  79. * reallocating for the same index
  80. */
  81. spin_lock(&index_init_lock);
  82. if (mem_section[root]) {
  83. ret = -EEXIST;
  84. goto out;
  85. }
  86. mem_section[root] = section;
  87. out:
  88. spin_unlock(&index_init_lock);
  89. return ret;
  90. }
  91. #else /* !SPARSEMEM_EXTREME */
  92. static inline int sparse_index_init(unsigned long section_nr, int nid)
  93. {
  94. return 0;
  95. }
  96. #endif
  97. /*
  98. * Although written for the SPARSEMEM_EXTREME case, this happens
  99. * to also work for the flat array case because
  100. * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
  101. */
  102. int __section_nr(struct mem_section* ms)
  103. {
  104. unsigned long root_nr;
  105. struct mem_section* root;
  106. for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
  107. root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
  108. if (!root)
  109. continue;
  110. if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
  111. break;
  112. }
  113. return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
  114. }
  115. /*
  116. * During early boot, before section_mem_map is used for an actual
  117. * mem_map, we use section_mem_map to store the section's NUMA
  118. * node. This keeps us from having to use another data structure. The
  119. * node information is cleared just before we store the real mem_map.
  120. */
  121. static inline unsigned long sparse_encode_early_nid(int nid)
  122. {
  123. return (nid << SECTION_NID_SHIFT);
  124. }
  125. static inline int sparse_early_nid(struct mem_section *section)
  126. {
  127. return (section->section_mem_map >> SECTION_NID_SHIFT);
  128. }
  129. /* Record a memory area against a node. */
  130. void __init memory_present(int nid, unsigned long start, unsigned long end)
  131. {
  132. unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
  133. unsigned long pfn;
  134. /*
  135. * Sanity checks - do not allow an architecture to pass
  136. * in larger pfns than the maximum scope of sparsemem:
  137. */
  138. if (start >= max_arch_pfn)
  139. return;
  140. if (end >= max_arch_pfn)
  141. end = max_arch_pfn;
  142. start &= PAGE_SECTION_MASK;
  143. for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  144. unsigned long section = pfn_to_section_nr(pfn);
  145. struct mem_section *ms;
  146. sparse_index_init(section, nid);
  147. set_section_nid(section, nid);
  148. ms = __nr_to_section(section);
  149. if (!ms->section_mem_map)
  150. ms->section_mem_map = sparse_encode_early_nid(nid) |
  151. SECTION_MARKED_PRESENT;
  152. }
  153. }
  154. /*
  155. * Only used by the i386 NUMA architecures, but relatively
  156. * generic code.
  157. */
  158. unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
  159. unsigned long end_pfn)
  160. {
  161. unsigned long pfn;
  162. unsigned long nr_pages = 0;
  163. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  164. if (nid != early_pfn_to_nid(pfn))
  165. continue;
  166. if (pfn_present(pfn))
  167. nr_pages += PAGES_PER_SECTION;
  168. }
  169. return nr_pages * sizeof(struct page);
  170. }
  171. /*
  172. * Subtle, we encode the real pfn into the mem_map such that
  173. * the identity pfn - section_mem_map will return the actual
  174. * physical page frame number.
  175. */
  176. static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  177. {
  178. return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  179. }
  180. /*
  181. * Decode mem_map from the coded memmap
  182. */
  183. static
  184. struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  185. {
  186. /* mask off the extra low bits of information */
  187. coded_mem_map &= SECTION_MAP_MASK;
  188. return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  189. }
  190. static int __meminit sparse_init_one_section(struct mem_section *ms,
  191. unsigned long pnum, struct page *mem_map,
  192. unsigned long *pageblock_bitmap)
  193. {
  194. if (!present_section(ms))
  195. return -EINVAL;
  196. ms->section_mem_map &= ~SECTION_MAP_MASK;
  197. ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
  198. SECTION_HAS_MEM_MAP;
  199. ms->pageblock_flags = pageblock_bitmap;
  200. return 1;
  201. }
  202. static unsigned long usemap_size(void)
  203. {
  204. unsigned long size_bytes;
  205. size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
  206. size_bytes = roundup(size_bytes, sizeof(unsigned long));
  207. return size_bytes;
  208. }
  209. #ifdef CONFIG_MEMORY_HOTPLUG
  210. static unsigned long *__kmalloc_section_usemap(void)
  211. {
  212. return kmalloc(usemap_size(), GFP_KERNEL);
  213. }
  214. #endif /* CONFIG_MEMORY_HOTPLUG */
  215. static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
  216. {
  217. unsigned long *usemap;
  218. struct mem_section *ms = __nr_to_section(pnum);
  219. int nid = sparse_early_nid(ms);
  220. usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
  221. if (usemap)
  222. return usemap;
  223. /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
  224. nid = 0;
  225. printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
  226. return NULL;
  227. }
  228. #ifndef CONFIG_SPARSEMEM_VMEMMAP
  229. struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
  230. {
  231. struct page *map;
  232. map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
  233. if (map)
  234. return map;
  235. map = alloc_bootmem_node(NODE_DATA(nid),
  236. sizeof(struct page) * PAGES_PER_SECTION);
  237. return map;
  238. }
  239. #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
  240. struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
  241. {
  242. struct page *map;
  243. struct mem_section *ms = __nr_to_section(pnum);
  244. int nid = sparse_early_nid(ms);
  245. map = sparse_mem_map_populate(pnum, nid);
  246. if (map)
  247. return map;
  248. printk(KERN_ERR "%s: sparsemem memory map backing failed "
  249. "some memory will not be available.\n", __FUNCTION__);
  250. ms->section_mem_map = 0;
  251. return NULL;
  252. }
  253. void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
  254. {
  255. }
  256. /*
  257. * Allocate the accumulated non-linear sections, allocate a mem_map
  258. * for each and record the physical to section mapping.
  259. */
  260. void __init sparse_init(void)
  261. {
  262. unsigned long pnum;
  263. struct page *map;
  264. unsigned long *usemap;
  265. unsigned long **usemap_map;
  266. int size;
  267. /*
  268. * map is using big page (aka 2M in x86 64 bit)
  269. * usemap is less one page (aka 24 bytes)
  270. * so alloc 2M (with 2M align) and 24 bytes in turn will
  271. * make next 2M slip to one more 2M later.
  272. * then in big system, the memory will have a lot of holes...
  273. * here try to allocate 2M pages continously.
  274. *
  275. * powerpc need to call sparse_init_one_section right after each
  276. * sparse_early_mem_map_alloc, so allocate usemap_map at first.
  277. */
  278. size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
  279. usemap_map = alloc_bootmem(size);
  280. if (!usemap_map)
  281. panic("can not allocate usemap_map\n");
  282. for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  283. if (!present_section_nr(pnum))
  284. continue;
  285. usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
  286. }
  287. for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  288. if (!present_section_nr(pnum))
  289. continue;
  290. usemap = usemap_map[pnum];
  291. if (!usemap)
  292. continue;
  293. map = sparse_early_mem_map_alloc(pnum);
  294. if (!map)
  295. continue;
  296. sparse_init_one_section(__nr_to_section(pnum), pnum, map,
  297. usemap);
  298. }
  299. vmemmap_populate_print_last();
  300. free_bootmem(__pa(usemap_map), size);
  301. }
  302. #ifdef CONFIG_MEMORY_HOTPLUG
  303. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  304. static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  305. unsigned long nr_pages)
  306. {
  307. /* This will make the necessary allocations eventually. */
  308. return sparse_mem_map_populate(pnum, nid);
  309. }
  310. static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
  311. {
  312. return; /* XXX: Not implemented yet */
  313. }
  314. #else
  315. static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
  316. {
  317. struct page *page, *ret;
  318. unsigned long memmap_size = sizeof(struct page) * nr_pages;
  319. page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
  320. if (page)
  321. goto got_map_page;
  322. ret = vmalloc(memmap_size);
  323. if (ret)
  324. goto got_map_ptr;
  325. return NULL;
  326. got_map_page:
  327. ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
  328. got_map_ptr:
  329. memset(ret, 0, memmap_size);
  330. return ret;
  331. }
  332. static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
  333. unsigned long nr_pages)
  334. {
  335. return __kmalloc_section_memmap(nr_pages);
  336. }
  337. static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
  338. {
  339. if (is_vmalloc_addr(memmap))
  340. vfree(memmap);
  341. else
  342. free_pages((unsigned long)memmap,
  343. get_order(sizeof(struct page) * nr_pages));
  344. }
  345. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  346. static void free_section_usemap(struct page *memmap, unsigned long *usemap)
  347. {
  348. if (!usemap)
  349. return;
  350. /*
  351. * Check to see if allocation came from hot-plug-add
  352. */
  353. if (PageSlab(virt_to_page(usemap))) {
  354. kfree(usemap);
  355. if (memmap)
  356. __kfree_section_memmap(memmap, PAGES_PER_SECTION);
  357. return;
  358. }
  359. /*
  360. * TODO: Allocations came from bootmem - how do I free up ?
  361. */
  362. printk(KERN_WARNING "Not freeing up allocations from bootmem "
  363. "- leaking memory\n");
  364. }
  365. /*
  366. * returns the number of sections whose mem_maps were properly
  367. * set. If this is <=0, then that means that the passed-in
  368. * map was not consumed and must be freed.
  369. */
  370. int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  371. int nr_pages)
  372. {
  373. unsigned long section_nr = pfn_to_section_nr(start_pfn);
  374. struct pglist_data *pgdat = zone->zone_pgdat;
  375. struct mem_section *ms;
  376. struct page *memmap;
  377. unsigned long *usemap;
  378. unsigned long flags;
  379. int ret;
  380. /*
  381. * no locking for this, because it does its own
  382. * plus, it does a kmalloc
  383. */
  384. ret = sparse_index_init(section_nr, pgdat->node_id);
  385. if (ret < 0 && ret != -EEXIST)
  386. return ret;
  387. memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
  388. if (!memmap)
  389. return -ENOMEM;
  390. usemap = __kmalloc_section_usemap();
  391. if (!usemap) {
  392. __kfree_section_memmap(memmap, nr_pages);
  393. return -ENOMEM;
  394. }
  395. pgdat_resize_lock(pgdat, &flags);
  396. ms = __pfn_to_section(start_pfn);
  397. if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
  398. ret = -EEXIST;
  399. goto out;
  400. }
  401. ms->section_mem_map |= SECTION_MARKED_PRESENT;
  402. ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
  403. out:
  404. pgdat_resize_unlock(pgdat, &flags);
  405. if (ret <= 0) {
  406. kfree(usemap);
  407. __kfree_section_memmap(memmap, nr_pages);
  408. }
  409. return ret;
  410. }
  411. void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
  412. {
  413. struct page *memmap = NULL;
  414. unsigned long *usemap = NULL;
  415. if (ms->section_mem_map) {
  416. usemap = ms->pageblock_flags;
  417. memmap = sparse_decode_mem_map(ms->section_mem_map,
  418. __section_nr(ms));
  419. ms->section_mem_map = 0;
  420. ms->pageblock_flags = NULL;
  421. }
  422. free_section_usemap(memmap, usemap);
  423. }
  424. #endif