init.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. #include <linux/io.h>
  2. #include <linux/memblock.h>
  3. #include <asm/cacheflush.h>
  4. #include <asm/pgtable.h>
  5. #include <asm/realmode.h>
  6. struct real_mode_header *real_mode_header;
  7. u32 *trampoline_cr4_features;
  8. void __init setup_real_mode(void)
  9. {
  10. phys_addr_t mem;
  11. u16 real_mode_seg;
  12. u32 *rel;
  13. u32 count;
  14. u32 *ptr;
  15. u16 *seg;
  16. int i;
  17. unsigned char *base;
  18. struct trampoline_header *trampoline_header;
  19. size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
  20. #ifdef CONFIG_X86_64
  21. u64 *trampoline_pgd;
  22. u64 efer;
  23. #endif
  24. /* Has to be in very low memory so we can execute real-mode AP code. */
  25. mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
  26. if (!mem)
  27. panic("Cannot allocate trampoline\n");
  28. base = __va(mem);
  29. memblock_reserve(mem, size);
  30. real_mode_header = (struct real_mode_header *) base;
  31. printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
  32. base, (unsigned long long)mem, size);
  33. memcpy(base, real_mode_blob, size);
  34. real_mode_seg = __pa(base) >> 4;
  35. rel = (u32 *) real_mode_relocs;
  36. /* 16-bit segment relocations. */
  37. count = rel[0];
  38. rel = &rel[1];
  39. for (i = 0; i < count; i++) {
  40. seg = (u16 *) (base + rel[i]);
  41. *seg = real_mode_seg;
  42. }
  43. /* 32-bit linear relocations. */
  44. count = rel[i];
  45. rel = &rel[i + 1];
  46. for (i = 0; i < count; i++) {
  47. ptr = (u32 *) (base + rel[i]);
  48. *ptr += __pa(base);
  49. }
  50. /* Must be perfomed *after* relocation. */
  51. trampoline_header = (struct trampoline_header *)
  52. __va(real_mode_header->trampoline_header);
  53. #ifdef CONFIG_X86_32
  54. trampoline_header->start = __pa(startup_32_smp);
  55. trampoline_header->gdt_limit = __BOOT_DS + 7;
  56. trampoline_header->gdt_base = __pa(boot_gdt);
  57. #else
  58. /*
  59. * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
  60. * so we need to mask it out.
  61. */
  62. rdmsrl(MSR_EFER, efer);
  63. trampoline_header->efer = efer & ~EFER_LMA;
  64. trampoline_header->start = (u64) secondary_startup_64;
  65. trampoline_cr4_features = &trampoline_header->cr4;
  66. *trampoline_cr4_features = read_cr4();
  67. trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
  68. trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
  69. trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
  70. #endif
  71. }
  72. /*
  73. * set_real_mode_permissions() gets called very early, to guarantee the
  74. * availability of low memory. This is before the proper kernel page
  75. * tables are set up, so we cannot set page permissions in that
  76. * function. Thus, we use an arch_initcall instead.
  77. */
  78. static int __init set_real_mode_permissions(void)
  79. {
  80. unsigned char *base = (unsigned char *) real_mode_header;
  81. size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
  82. size_t ro_size =
  83. PAGE_ALIGN(real_mode_header->ro_end) -
  84. __pa(base);
  85. size_t text_size =
  86. PAGE_ALIGN(real_mode_header->ro_end) -
  87. real_mode_header->text_start;
  88. unsigned long text_start =
  89. (unsigned long) __va(real_mode_header->text_start);
  90. set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
  91. set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
  92. set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
  93. return 0;
  94. }
  95. arch_initcall(set_real_mode_permissions);