|
@@ -3780,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az,
|
|
|
return nr_range;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NO_BOOTMEM
|
|
|
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
|
|
|
- u64 goal, u64 limit)
|
|
|
-{
|
|
|
- void *ptr;
|
|
|
- u64 addr;
|
|
|
-
|
|
|
- if (limit > memblock.current_limit)
|
|
|
- limit = memblock.current_limit;
|
|
|
-
|
|
|
- addr = find_memory_core_early(nid, size, align, goal, limit);
|
|
|
-
|
|
|
- if (addr == MEMBLOCK_ERROR)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- ptr = phys_to_virt(addr);
|
|
|
- memset(ptr, 0, size);
|
|
|
- memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
|
|
|
- /*
|
|
|
- * The min_count is set to 0 so that bootmem allocated blocks
|
|
|
- * are never reported as leaks.
|
|
|
- */
|
|
|
- kmemleak_alloc(ptr, size, 0, 0);
|
|
|
- return ptr;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-
|
|
|
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
|
|
|
{
|
|
|
int i;
|