|
@@ -360,6 +360,19 @@ static u64 __init get_mem_size(unsigned long limit_pfn)
|
|
|
|
|
|
return mapped_pages << PAGE_SHIFT;
|
|
|
}
|
|
|
+static void __init early_reserve_initrd(void)
|
|
|
+{
|
|
|
+ /* Assume only end is not page aligned */
|
|
|
+ u64 ramdisk_image = boot_params.hdr.ramdisk_image;
|
|
|
+ u64 ramdisk_size = boot_params.hdr.ramdisk_size;
|
|
|
+ u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
|
|
|
+
|
|
|
+ if (!boot_params.hdr.type_of_loader ||
|
|
|
+ !ramdisk_image || !ramdisk_size)
|
|
|
+ return; /* No initrd provided by bootloader */
|
|
|
+
|
|
|
+ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
|
|
|
+}
|
|
|
static void __init reserve_initrd(void)
|
|
|
{
|
|
|
/* Assume only end is not page aligned */
|
|
@@ -386,10 +399,6 @@ static void __init reserve_initrd(void)
|
|
|
if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
|
|
|
PFN_DOWN(ramdisk_end))) {
|
|
|
/* All are mapped, easy case */
|
|
|
- /*
|
|
|
- * don't need to reserve again, already reserved early
|
|
|
- * in i386_start_kernel
|
|
|
- */
|
|
|
initrd_start = ramdisk_image + PAGE_OFFSET;
|
|
|
initrd_end = initrd_start + ramdisk_size;
|
|
|
return;
|
|
@@ -400,6 +409,9 @@ static void __init reserve_initrd(void)
|
|
|
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
|
|
|
}
|
|
|
#else
|
|
|
+static void __init early_reserve_initrd(void)
|
|
|
+{
|
|
|
+}
|
|
|
static void __init reserve_initrd(void)
|
|
|
{
|
|
|
}
|
|
@@ -760,6 +772,8 @@ early_param("reservelow", parse_reservelow);
|
|
|
|
|
|
void __init setup_arch(char **cmdline_p)
|
|
|
{
|
|
|
+ early_reserve_initrd();
|
|
|
+
|
|
|
#ifdef CONFIG_X86_32
|
|
|
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
|
|
|
visws_early_detect();
|