|
@@ -92,9 +92,13 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
|
|
|
*
|
|
|
* Find @size free area aligned to @align in the specified range and node.
|
|
|
*
|
|
|
+ * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
|
|
|
+ * memory we found if not in hotpluggable ranges.
|
|
|
+ *
|
|
|
* RETURNS:
|
|
|
* Found address on success, %0 on failure.
|
|
|
*/
|
|
|
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
|
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
|
|
|
phys_addr_t end, phys_addr_t size,
|
|
|
phys_addr_t align, int nid)
|
|
@@ -139,6 +143,36 @@ restart:
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
|
|
|
+ phys_addr_t end, phys_addr_t size,
|
|
|
+ phys_addr_t align, int nid)
|
|
|
+{
|
|
|
+ phys_addr_t this_start, this_end, cand;
|
|
|
+ u64 i;
|
|
|
+
|
|
|
+ /* pump up @end */
|
|
|
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
|
|
|
+ end = memblock.current_limit;
|
|
|
+
|
|
|
+ /* avoid allocating the first page */
|
|
|
+ start = max_t(phys_addr_t, start, PAGE_SIZE);
|
|
|
+ end = max(start, end);
|
|
|
+
|
|
|
+ for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
|
|
|
+ this_start = clamp(this_start, start, end);
|
|
|
+ this_end = clamp(this_end, start, end);
|
|
|
+
|
|
|
+ if (this_end < size)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ cand = round_down(this_end - size, align);
|
|
|
+ if (cand >= this_start)
|
|
|
+ return cand;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
|
|
/**
|
|
|
* memblock_find_in_range - find free area in given range
|