memblock.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. #include <linux/kernel.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/bitops.h>
  5. #include <linux/memblock.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/mm.h>
  8. #include <linux/range.h>
  9. /* Check for already reserved areas */
  10. static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
  11. {
  12. struct memblock_region *r;
  13. u64 addr = *addrp, last;
  14. u64 size = *sizep;
  15. bool changed = false;
  16. again:
  17. last = addr + size;
  18. for_each_memblock(reserved, r) {
  19. if (last > r->base && addr < r->base) {
  20. size = r->base - addr;
  21. changed = true;
  22. goto again;
  23. }
  24. if (last > (r->base + r->size) && addr < (r->base + r->size)) {
  25. addr = round_up(r->base + r->size, align);
  26. size = last - addr;
  27. changed = true;
  28. goto again;
  29. }
  30. if (last <= (r->base + r->size) && addr >= r->base) {
  31. (*sizep)++;
  32. return false;
  33. }
  34. }
  35. if (changed) {
  36. *addrp = addr;
  37. *sizep = size;
  38. }
  39. return changed;
  40. }
  41. static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
  42. u64 *sizep, u64 align)
  43. {
  44. u64 addr, last;
  45. addr = round_up(ei_start, align);
  46. if (addr < start)
  47. addr = round_up(start, align);
  48. if (addr >= ei_last)
  49. goto out;
  50. *sizep = ei_last - addr;
  51. while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
  52. ;
  53. last = addr + *sizep;
  54. if (last > ei_last)
  55. goto out;
  56. return addr;
  57. out:
  58. return MEMBLOCK_ERROR;
  59. }
  60. /*
  61. * Find next free range after start, and size is returned in *sizep
  62. */
  63. u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
  64. {
  65. struct memblock_region *r;
  66. for_each_memblock(memory, r) {
  67. u64 ei_start = r->base;
  68. u64 ei_last = ei_start + r->size;
  69. u64 addr;
  70. addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
  71. sizep, align);
  72. if (addr != MEMBLOCK_ERROR)
  73. return addr;
  74. }
  75. return MEMBLOCK_ERROR;
  76. }
  77. static __init struct range *find_range_array(int count)
  78. {
  79. u64 end, size, mem;
  80. struct range *range;
  81. size = sizeof(struct range) * count;
  82. end = memblock.current_limit;
  83. mem = memblock_find_in_range(0, end, size, sizeof(struct range));
  84. if (mem == MEMBLOCK_ERROR)
  85. panic("can not find more space for range array");
  86. /*
  87. * This range is tempoaray, so don't reserve it, it will not be
  88. * overlapped because We will not alloccate new buffer before
  89. * We discard this one
  90. */
  91. range = __va(mem);
  92. memset(range, 0, size);
  93. return range;
  94. }
  95. #ifdef CONFIG_NO_BOOTMEM
  96. static void __init memblock_x86_subtract_reserved(struct range *range, int az)
  97. {
  98. u64 final_start, final_end;
  99. struct memblock_region *r;
  100. /* Take out region array itself at first*/
  101. memblock_free_reserved_regions();
  102. pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
  103. for_each_memblock(reserved, r) {
  104. pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
  105. final_start = PFN_DOWN(r->base);
  106. final_end = PFN_UP(r->base + r->size);
  107. if (final_start >= final_end)
  108. continue;
  109. subtract_range(range, az, final_start, final_end);
  110. }
  111. /* Put region array back ? */
  112. memblock_reserve_reserved_regions();
  113. }
  114. struct count_data {
  115. int nr;
  116. };
  117. static int __init count_work_fn(unsigned long start_pfn,
  118. unsigned long end_pfn, void *datax)
  119. {
  120. struct count_data *data = datax;
  121. data->nr++;
  122. return 0;
  123. }
  124. static int __init count_early_node_map(int nodeid)
  125. {
  126. struct count_data data;
  127. data.nr = 0;
  128. work_with_active_regions(nodeid, count_work_fn, &data);
  129. return data.nr;
  130. }
  131. int __init get_free_all_memory_range(struct range **rangep, int nodeid)
  132. {
  133. int count;
  134. struct range *range;
  135. int nr_range;
  136. count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
  137. range = find_range_array(count);
  138. nr_range = 0;
  139. /*
  140. * Use early_node_map[] and memblock.reserved.region to get range array
  141. * at first
  142. */
  143. nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
  144. #ifdef CONFIG_X86_32
  145. subtract_range(range, count, max_low_pfn, -1ULL);
  146. #endif
  147. memblock_x86_subtract_reserved(range, count);
  148. nr_range = clean_sort_range(range, count);
  149. *rangep = range;
  150. return nr_range;
  151. }
  152. #else
  153. void __init memblock_x86_to_bootmem(u64 start, u64 end)
  154. {
  155. int count;
  156. u64 final_start, final_end;
  157. struct memblock_region *r;
  158. /* Take out region array itself */
  159. memblock_free_reserved_regions();
  160. count = memblock.reserved.cnt;
  161. pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1);
  162. for_each_memblock(reserved, r) {
  163. pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1);
  164. final_start = max(start, r->base);
  165. final_end = min(end, r->base + r->size);
  166. if (final_start >= final_end) {
  167. pr_cont("\n");
  168. continue;
  169. }
  170. pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1);
  171. reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
  172. }
  173. /* Put region array back ? */
  174. memblock_reserve_reserved_regions();
  175. }
  176. #endif
  177. void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
  178. {
  179. if (start == end)
  180. return;
  181. if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end))
  182. return;
  183. memblock_reserve(start, end - start);
  184. }
  185. void __init memblock_x86_free_range(u64 start, u64 end)
  186. {
  187. if (start == end)
  188. return;
  189. if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end))
  190. return;
  191. memblock_free(start, end - start);
  192. }
  193. /*
  194. * Need to call this function after memblock_x86_register_active_regions,
  195. * so early_node_map[] is filled already.
  196. */
  197. u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
  198. {
  199. u64 addr;
  200. addr = find_memory_core_early(nid, size, align, start, end);
  201. if (addr != MEMBLOCK_ERROR)
  202. return addr;
  203. /* Fallback, should already have start end within node range */
  204. return memblock_find_in_range(start, end, size, align);
  205. }
  206. /*
  207. * Finds an active region in the address range from start_pfn to last_pfn and
  208. * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
  209. */
  210. static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
  211. unsigned long start_pfn,
  212. unsigned long last_pfn,
  213. unsigned long *ei_startpfn,
  214. unsigned long *ei_endpfn)
  215. {
  216. u64 align = PAGE_SIZE;
  217. *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
  218. *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
  219. /* Skip map entries smaller than a page */
  220. if (*ei_startpfn >= *ei_endpfn)
  221. return 0;
  222. /* Skip if map is outside the node */
  223. if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
  224. return 0;
  225. /* Check for overlaps */
  226. if (*ei_startpfn < start_pfn)
  227. *ei_startpfn = start_pfn;
  228. if (*ei_endpfn > last_pfn)
  229. *ei_endpfn = last_pfn;
  230. return 1;
  231. }
  232. /* Walk the memblock.memory map and register active regions within a node */
  233. void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
  234. unsigned long last_pfn)
  235. {
  236. unsigned long ei_startpfn;
  237. unsigned long ei_endpfn;
  238. struct memblock_region *r;
  239. for_each_memblock(memory, r)
  240. if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
  241. &ei_startpfn, &ei_endpfn))
  242. add_active_range(nid, ei_startpfn, ei_endpfn);
  243. }
  244. /*
  245. * Find the hole size (in bytes) in the memory range.
  246. * @start: starting address of the memory range to scan
  247. * @end: ending address of the memory range to scan
  248. */
  249. u64 __init memblock_x86_hole_size(u64 start, u64 end)
  250. {
  251. unsigned long start_pfn = start >> PAGE_SHIFT;
  252. unsigned long last_pfn = end >> PAGE_SHIFT;
  253. unsigned long ei_startpfn, ei_endpfn, ram = 0;
  254. struct memblock_region *r;
  255. for_each_memblock(memory, r)
  256. if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
  257. &ei_startpfn, &ei_endpfn))
  258. ram += ei_endpfn - ei_startpfn;
  259. return end - start - ((u64)ram << PAGE_SHIFT);
  260. }