sparse.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * sparse memory mappings.
  3. */
  4. #include <linux/config.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/bootmem.h>
  8. #include <linux/module.h>
  9. #include <asm/dma.h>
  10. /*
  11. * Permanent SPARSEMEM data:
  12. *
  13. * 1) mem_section - memory sections, mem_map's for valid memory
  14. */
  15. struct mem_section mem_section[NR_MEM_SECTIONS];
  16. EXPORT_SYMBOL(mem_section);
  17. /* Record a memory area against a node. */
  18. void memory_present(int nid, unsigned long start, unsigned long end)
  19. {
  20. unsigned long pfn;
  21. start &= PAGE_SECTION_MASK;
  22. for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  23. unsigned long section = pfn_to_section_nr(pfn);
  24. if (!mem_section[section].section_mem_map)
  25. mem_section[section].section_mem_map = SECTION_MARKED_PRESENT;
  26. }
  27. }
  28. /*
  29. * Only used by the i386 NUMA architecures, but relatively
  30. * generic code.
  31. */
  32. unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
  33. unsigned long end_pfn)
  34. {
  35. unsigned long pfn;
  36. unsigned long nr_pages = 0;
  37. for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
  38. if (nid != early_pfn_to_nid(pfn))
  39. continue;
  40. if (pfn_valid(pfn))
  41. nr_pages += PAGES_PER_SECTION;
  42. }
  43. return nr_pages * sizeof(struct page);
  44. }
  45. /*
  46. * Subtle, we encode the real pfn into the mem_map such that
  47. * the identity pfn - section_mem_map will return the actual
  48. * physical page frame number.
  49. */
  50. static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
  51. {
  52. return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
  53. }
  54. /*
  55. * We need this if we ever free the mem_maps. While not implemented yet,
  56. * this function is included for parity with its sibling.
  57. */
  58. static __attribute((unused))
  59. struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
  60. {
  61. return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
  62. }
  63. static int sparse_init_one_section(struct mem_section *ms,
  64. unsigned long pnum, struct page *mem_map)
  65. {
  66. if (!valid_section(ms))
  67. return -EINVAL;
  68. ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
  69. return 1;
  70. }
  71. static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
  72. {
  73. struct page *map;
  74. int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
  75. map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
  76. if (map)
  77. return map;
  78. map = alloc_bootmem_node(NODE_DATA(nid),
  79. sizeof(struct page) * PAGES_PER_SECTION);
  80. if (map)
  81. return map;
  82. printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
  83. mem_section[pnum].section_mem_map = 0;
  84. return NULL;
  85. }
  86. /*
  87. * Allocate the accumulated non-linear sections, allocate a mem_map
  88. * for each and record the physical to section mapping.
  89. */
  90. void sparse_init(void)
  91. {
  92. unsigned long pnum;
  93. struct page *map;
  94. for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
  95. if (!valid_section_nr(pnum))
  96. continue;
  97. map = sparse_early_mem_map_alloc(pnum);
  98. if (map)
  99. sparse_init_one_section(&mem_section[pnum], pnum, map);
  100. }
  101. }
  102. /*
  103. * returns the number of sections whose mem_maps were properly
  104. * set. If this is <=0, then that means that the passed-in
  105. * map was not consumed and must be freed.
  106. */
  107. int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map)
  108. {
  109. struct mem_section *ms = __pfn_to_section(start_pfn);
  110. if (ms->section_mem_map & SECTION_MARKED_PRESENT)
  111. return -EEXIST;
  112. ms->section_mem_map |= SECTION_MARKED_PRESENT;
  113. return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);
  114. }