memblock.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #include <linux/kernel.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/bitops.h>
  5. #include <linux/memblock.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/mm.h>
  8. #include <linux/range.h>
  9. static __init struct range *find_range_array(int count)
  10. {
  11. u64 end, size, mem;
  12. struct range *range;
  13. size = sizeof(struct range) * count;
  14. end = memblock.current_limit;
  15. mem = memblock_find_in_range(0, end, size, sizeof(struct range));
  16. if (!mem)
  17. panic("can not find more space for range array");
  18. /*
  19. * This range is tempoaray, so don't reserve it, it will not be
  20. * overlapped because We will not alloccate new buffer before
  21. * We discard this one
  22. */
  23. range = __va(mem);
  24. memset(range, 0, size);
  25. return range;
  26. }
  27. static void __init memblock_x86_subtract_reserved(struct range *range, int az)
  28. {
  29. u64 final_start, final_end;
  30. struct memblock_region *r;
  31. /* Take out region array itself at first*/
  32. memblock_free_reserved_regions();
  33. memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
  34. for_each_memblock(reserved, r) {
  35. memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
  36. final_start = PFN_DOWN(r->base);
  37. final_end = PFN_UP(r->base + r->size);
  38. if (final_start >= final_end)
  39. continue;
  40. subtract_range(range, az, final_start, final_end);
  41. }
  42. /* Put region array back ? */
  43. memblock_reserve_reserved_regions();
  44. }
  45. static int __init count_early_node_map(int nodeid)
  46. {
  47. int i, cnt = 0;
  48. for_each_mem_pfn_range(i, nodeid, NULL, NULL, NULL)
  49. cnt++;
  50. return cnt;
  51. }
  52. int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
  53. unsigned long start_pfn, unsigned long end_pfn)
  54. {
  55. int count;
  56. struct range *range;
  57. int nr_range;
  58. count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
  59. range = find_range_array(count);
  60. nr_range = 0;
  61. /*
  62. * Use early_node_map[] and memblock.reserved.region to get range array
  63. * at first
  64. */
  65. nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
  66. subtract_range(range, count, 0, start_pfn);
  67. subtract_range(range, count, end_pfn, -1ULL);
  68. memblock_x86_subtract_reserved(range, count);
  69. nr_range = clean_sort_range(range, count);
  70. *rangep = range;
  71. return nr_range;
  72. }
  73. int __init get_free_all_memory_range(struct range **rangep, int nodeid)
  74. {
  75. unsigned long end_pfn = -1UL;
  76. #ifdef CONFIG_X86_32
  77. end_pfn = max_low_pfn;
  78. #endif
  79. return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
  80. }
  81. static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
  82. {
  83. int i, count;
  84. struct range *range;
  85. int nr_range;
  86. u64 final_start, final_end;
  87. u64 free_size;
  88. struct memblock_region *r;
  89. count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
  90. range = find_range_array(count);
  91. nr_range = 0;
  92. addr = PFN_UP(addr);
  93. limit = PFN_DOWN(limit);
  94. for_each_memblock(memory, r) {
  95. final_start = PFN_UP(r->base);
  96. final_end = PFN_DOWN(r->base + r->size);
  97. if (final_start >= final_end)
  98. continue;
  99. if (final_start >= limit || final_end <= addr)
  100. continue;
  101. nr_range = add_range(range, count, nr_range, final_start, final_end);
  102. }
  103. subtract_range(range, count, 0, addr);
  104. subtract_range(range, count, limit, -1ULL);
  105. /* Subtract memblock.reserved.region in range ? */
  106. if (!get_free)
  107. goto sort_and_count_them;
  108. for_each_memblock(reserved, r) {
  109. final_start = PFN_DOWN(r->base);
  110. final_end = PFN_UP(r->base + r->size);
  111. if (final_start >= final_end)
  112. continue;
  113. if (final_start >= limit || final_end <= addr)
  114. continue;
  115. subtract_range(range, count, final_start, final_end);
  116. }
  117. sort_and_count_them:
  118. nr_range = clean_sort_range(range, count);
  119. free_size = 0;
  120. for (i = 0; i < nr_range; i++)
  121. free_size += range[i].end - range[i].start;
  122. return free_size << PAGE_SHIFT;
  123. }
  124. u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
  125. {
  126. return __memblock_x86_memory_in_range(addr, limit, true);
  127. }
  128. u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
  129. {
  130. return __memblock_x86_memory_in_range(addr, limit, false);
  131. }
  132. void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
  133. {
  134. if (start == end)
  135. return;
  136. if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
  137. return;
  138. memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
  139. memblock_reserve(start, end - start);
  140. }
  141. void __init memblock_x86_free_range(u64 start, u64 end)
  142. {
  143. if (start == end)
  144. return;
  145. if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
  146. return;
  147. memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
  148. memblock_free(start, end - start);
  149. }
  150. /*
  151. * Finds an active region in the address range from start_pfn to last_pfn and
  152. * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
  153. */
  154. static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
  155. unsigned long start_pfn,
  156. unsigned long last_pfn,
  157. unsigned long *ei_startpfn,
  158. unsigned long *ei_endpfn)
  159. {
  160. u64 align = PAGE_SIZE;
  161. *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
  162. *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
  163. /* Skip map entries smaller than a page */
  164. if (*ei_startpfn >= *ei_endpfn)
  165. return 0;
  166. /* Skip if map is outside the node */
  167. if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
  168. return 0;
  169. /* Check for overlaps */
  170. if (*ei_startpfn < start_pfn)
  171. *ei_startpfn = start_pfn;
  172. if (*ei_endpfn > last_pfn)
  173. *ei_endpfn = last_pfn;
  174. return 1;
  175. }
  176. /*
  177. * Find the hole size (in bytes) in the memory range.
  178. * @start: starting address of the memory range to scan
  179. * @end: ending address of the memory range to scan
  180. */
  181. u64 __init memblock_x86_hole_size(u64 start, u64 end)
  182. {
  183. unsigned long start_pfn = start >> PAGE_SHIFT;
  184. unsigned long last_pfn = end >> PAGE_SHIFT;
  185. unsigned long ei_startpfn, ei_endpfn, ram = 0;
  186. struct memblock_region *r;
  187. for_each_memblock(memory, r)
  188. if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
  189. &ei_startpfn, &ei_endpfn))
  190. ram += ei_endpfn - ei_startpfn;
  191. return end - start - ((u64)ram << PAGE_SHIFT);
  192. }