memblock.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #include <linux/kernel.h>
  2. #include <linux/types.h>
  3. #include <linux/init.h>
  4. #include <linux/bitops.h>
  5. #include <linux/memblock.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/mm.h>
  8. #include <linux/range.h>
  9. /* Check for already reserved areas */
  10. static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
  11. {
  12. struct memblock_region *r;
  13. u64 addr = *addrp, last;
  14. u64 size = *sizep;
  15. bool changed = false;
  16. again:
  17. last = addr + size;
  18. for_each_memblock(reserved, r) {
  19. if (last > r->base && addr < r->base) {
  20. size = r->base - addr;
  21. changed = true;
  22. goto again;
  23. }
  24. if (last > (r->base + r->size) && addr < (r->base + r->size)) {
  25. addr = round_up(r->base + r->size, align);
  26. size = last - addr;
  27. changed = true;
  28. goto again;
  29. }
  30. if (last <= (r->base + r->size) && addr >= r->base) {
  31. (*sizep)++;
  32. return false;
  33. }
  34. }
  35. if (changed) {
  36. *addrp = addr;
  37. *sizep = size;
  38. }
  39. return changed;
  40. }
  41. static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
  42. u64 *sizep, u64 align)
  43. {
  44. u64 addr, last;
  45. addr = round_up(ei_start, align);
  46. if (addr < start)
  47. addr = round_up(start, align);
  48. if (addr >= ei_last)
  49. goto out;
  50. *sizep = ei_last - addr;
  51. while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
  52. ;
  53. last = addr + *sizep;
  54. if (last > ei_last)
  55. goto out;
  56. return addr;
  57. out:
  58. return MEMBLOCK_ERROR;
  59. }
  60. /*
  61. * Find next free range after start, and size is returned in *sizep
  62. */
  63. u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
  64. {
  65. struct memblock_region *r;
  66. for_each_memblock(memory, r) {
  67. u64 ei_start = r->base;
  68. u64 ei_last = ei_start + r->size;
  69. u64 addr;
  70. addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
  71. sizep, align);
  72. if (addr != MEMBLOCK_ERROR)
  73. return addr;
  74. }
  75. return MEMBLOCK_ERROR;
  76. }
  77. #ifndef CONFIG_NO_BOOTMEM
  78. void __init memblock_x86_to_bootmem(u64 start, u64 end)
  79. {
  80. int count;
  81. u64 final_start, final_end;
  82. struct memblock_region *r;
  83. /* Take out region array itself */
  84. memblock_free_reserved_regions();
  85. count = memblock.reserved.cnt;
  86. pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1);
  87. for_each_memblock(reserved, r) {
  88. pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1);
  89. final_start = max(start, r->base);
  90. final_end = min(end, r->base + r->size);
  91. if (final_start >= final_end) {
  92. pr_cont("\n");
  93. continue;
  94. }
  95. pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1);
  96. reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
  97. }
  98. /* Put region array back ? */
  99. memblock_reserve_reserved_regions();
  100. }
  101. #endif
  102. void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
  103. {
  104. if (start == end)
  105. return;
  106. if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end))
  107. return;
  108. memblock_reserve(start, end - start);
  109. }
  110. void __init memblock_x86_free_range(u64 start, u64 end)
  111. {
  112. if (start == end)
  113. return;
  114. if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end))
  115. return;
  116. memblock_free(start, end - start);
  117. }