|
@@ -20,6 +20,8 @@
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/memblock.h>
|
|
|
|
|
|
+#include <asm-generic/sections.h>
|
|
|
+
|
|
|
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
|
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
|
|
|
|
@@ -32,6 +34,7 @@ struct memblock memblock __initdata_memblock = {
|
|
|
.reserved.cnt = 1, /* empty dummy entry */
|
|
|
.reserved.max = INIT_MEMBLOCK_REGIONS,
|
|
|
|
|
|
+ .bottom_up = false,
|
|
|
.current_limit = MEMBLOCK_ALLOC_ANYWHERE,
|
|
|
};
|
|
|
|
|
@@ -82,6 +85,38 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
|
|
|
return (i < type->cnt) ? i : -1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * __memblock_find_range_bottom_up - find free area utility in bottom-up
|
|
|
+ * @start: start of candidate range
|
|
|
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
|
|
|
+ * @size: size of free area to find
|
|
|
+ * @align: alignment of free area to find
|
|
|
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
|
|
|
+ *
|
|
|
+ * Utility called from memblock_find_in_range_node(), find free area bottom-up.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Found address on success, 0 on failure.
|
|
|
+ */
|
|
|
+static phys_addr_t __init_memblock
|
|
|
+__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
|
|
|
+ phys_addr_t size, phys_addr_t align, int nid)
|
|
|
+{
|
|
|
+ phys_addr_t this_start, this_end, cand;
|
|
|
+ u64 i;
|
|
|
+
|
|
|
+ for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
|
|
|
+ this_start = clamp(this_start, start, end);
|
|
|
+ this_end = clamp(this_end, start, end);
|
|
|
+
|
|
|
+ cand = round_up(this_start, align);
|
|
|
+ if (cand < this_end && this_end - cand >= size)
|
|
|
+ return cand;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __memblock_find_range_top_down - find free area utility, in top-down
|
|
|
* @start: start of candidate range
|
|
@@ -93,7 +128,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
|
|
|
* Utility called from memblock_find_in_range_node(), find free area top-down.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * Found address on success, %0 on failure.
|
|
|
+ * Found address on success, 0 on failure.
|
|
|
*/
|
|
|
static phys_addr_t __init_memblock
|
|
|
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
@@ -127,13 +162,24 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
|
|
|
*
|
|
|
* Find @size free area aligned to @align in the specified range and node.
|
|
|
*
|
|
|
+ * When allocation direction is bottom-up, the @start should be greater
|
|
|
+ * than the end of the kernel image. Otherwise, it will be trimmed. The
|
|
|
+ * reason is that we want the bottom-up allocation just near the kernel
|
|
|
+ * image so it is highly likely that the allocated memory and the kernel
|
|
|
+ * will reside in the same node.
|
|
|
+ *
|
|
|
+ * If bottom-up allocation failed, will try to allocate memory top-down.
|
|
|
+ *
|
|
|
* RETURNS:
|
|
|
- * Found address on success, %0 on failure.
|
|
|
+ * Found address on success, 0 on failure.
|
|
|
*/
|
|
|
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
|
|
|
phys_addr_t end, phys_addr_t size,
|
|
|
phys_addr_t align, int nid)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+ phys_addr_t kernel_end;
|
|
|
+
|
|
|
/* pump up @end */
|
|
|
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
|
|
|
end = memblock.current_limit;
|
|
@@ -141,6 +187,37 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
|
|
|
/* avoid allocating the first page */
|
|
|
start = max_t(phys_addr_t, start, PAGE_SIZE);
|
|
|
end = max(start, end);
|
|
|
+ kernel_end = __pa_symbol(_end);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * try bottom-up allocation only when bottom-up mode
|
|
|
+ * is set and @end is above the kernel image.
|
|
|
+ */
|
|
|
+ if (memblock_bottom_up() && end > kernel_end) {
|
|
|
+ phys_addr_t bottom_up_start;
|
|
|
+
|
|
|
+ /* make sure we will allocate above the kernel */
|
|
|
+ bottom_up_start = max(start, kernel_end);
|
|
|
+
|
|
|
+ /* ok, try bottom-up allocation first */
|
|
|
+ ret = __memblock_find_range_bottom_up(bottom_up_start, end,
|
|
|
+ size, align, nid);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * we always limit bottom-up allocation above the kernel,
|
|
|
+ * but top-down allocation doesn't have the limit, so
|
|
|
+ * retrying top-down allocation may succeed when bottom-up
|
|
|
+ * allocation failed.
|
|
|
+ *
|
|
|
+ * bottom-up allocation is expected to be fail very rarely,
|
|
|
+ * so we use WARN_ONCE() here to see the stack trace if
|
|
|
+ * fail happens.
|
|
|
+ */
|
|
|
+ WARN_ONCE(1, "memblock: bottom-up allocation failed, "
|
|
|
+ "memory hotunplug may be affected\n");
|
|
|
+ }
|
|
|
|
|
|
return __memblock_find_range_top_down(start, end, size, align, nid);
|
|
|
}
|
|
@@ -155,7 +232,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
|
|
|
* Find @size free area aligned to @align in the specified range.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * Found address on success, %0 on failure.
|
|
|
+ * Found address on success, 0 on failure.
|
|
|
*/
|
|
|
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
|
|
|
phys_addr_t end, phys_addr_t size,
|