memblock.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #include <linux/kernel.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/bitops.h>
  5. #include <linux/memblock.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/mm.h>
  8. #include <linux/range.h>
  9. static __init struct range *find_range_array(int count)
  10. {
  11. u64 end, size, mem;
  12. struct range *range;
  13. size = sizeof(struct range) * count;
  14. end = memblock.current_limit;
  15. mem = memblock_find_in_range(0, end, size, sizeof(struct range));
  16. if (!mem)
  17. panic("can not find more space for range array");
  18. /*
  19. * This range is tempoaray, so don't reserve it, it will not be
  20. * overlapped because We will not alloccate new buffer before
  21. * We discard this one
  22. */
  23. range = __va(mem);
  24. memset(range, 0, size);
  25. return range;
  26. }
  27. static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
  28. {
  29. int i, count;
  30. struct range *range;
  31. int nr_range;
  32. u64 final_start, final_end;
  33. u64 free_size;
  34. struct memblock_region *r;
  35. count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
  36. range = find_range_array(count);
  37. nr_range = 0;
  38. addr = PFN_UP(addr);
  39. limit = PFN_DOWN(limit);
  40. for_each_memblock(memory, r) {
  41. final_start = PFN_UP(r->base);
  42. final_end = PFN_DOWN(r->base + r->size);
  43. if (final_start >= final_end)
  44. continue;
  45. if (final_start >= limit || final_end <= addr)
  46. continue;
  47. nr_range = add_range(range, count, nr_range, final_start, final_end);
  48. }
  49. subtract_range(range, count, 0, addr);
  50. subtract_range(range, count, limit, -1ULL);
  51. /* Subtract memblock.reserved.region in range ? */
  52. if (!get_free)
  53. goto sort_and_count_them;
  54. for_each_memblock(reserved, r) {
  55. final_start = PFN_DOWN(r->base);
  56. final_end = PFN_UP(r->base + r->size);
  57. if (final_start >= final_end)
  58. continue;
  59. if (final_start >= limit || final_end <= addr)
  60. continue;
  61. subtract_range(range, count, final_start, final_end);
  62. }
  63. sort_and_count_them:
  64. nr_range = clean_sort_range(range, count);
  65. free_size = 0;
  66. for (i = 0; i < nr_range; i++)
  67. free_size += range[i].end - range[i].start;
  68. return free_size << PAGE_SHIFT;
  69. }
  70. u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
  71. {
  72. return __memblock_x86_memory_in_range(addr, limit, true);
  73. }
  74. u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
  75. {
  76. return __memblock_x86_memory_in_range(addr, limit, false);
  77. }
  78. void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
  79. {
  80. if (start == end)
  81. return;
  82. if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
  83. return;
  84. memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
  85. memblock_reserve(start, end - start);
  86. }
  87. void __init memblock_x86_free_range(u64 start, u64 end)
  88. {
  89. if (start == end)
  90. return;
  91. if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
  92. return;
  93. memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
  94. memblock_free(start, end - start);
  95. }
  96. /*
  97. * Finds an active region in the address range from start_pfn to last_pfn and
  98. * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
  99. */
  100. static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
  101. unsigned long start_pfn,
  102. unsigned long last_pfn,
  103. unsigned long *ei_startpfn,
  104. unsigned long *ei_endpfn)
  105. {
  106. u64 align = PAGE_SIZE;
  107. *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
  108. *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
  109. /* Skip map entries smaller than a page */
  110. if (*ei_startpfn >= *ei_endpfn)
  111. return 0;
  112. /* Skip if map is outside the node */
  113. if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
  114. return 0;
  115. /* Check for overlaps */
  116. if (*ei_startpfn < start_pfn)
  117. *ei_startpfn = start_pfn;
  118. if (*ei_endpfn > last_pfn)
  119. *ei_endpfn = last_pfn;
  120. return 1;
  121. }
  122. /*
  123. * Find the hole size (in bytes) in the memory range.
  124. * @start: starting address of the memory range to scan
  125. * @end: ending address of the memory range to scan
  126. */
  127. u64 __init memblock_x86_hole_size(u64 start, u64 end)
  128. {
  129. unsigned long start_pfn = start >> PAGE_SHIFT;
  130. unsigned long last_pfn = end >> PAGE_SHIFT;
  131. unsigned long ei_startpfn, ei_endpfn, ram = 0;
  132. struct memblock_region *r;
  133. for_each_memblock(memory, r)
  134. if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
  135. &ei_startpfn, &ei_endpfn))
  136. ram += ei_endpfn - ei_startpfn;
  137. return end - start - ((u64)ram << PAGE_SHIFT);
  138. }