efi_64.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * x86_64 specific EFI support functions
  3. * Based on Extensible Firmware Interface Specification version 1.0
  4. *
  5. * Copyright (C) 2005-2008 Intel Co.
  6. * Fenghua Yu <fenghua.yu@intel.com>
  7. * Bibo Mao <bibo.mao@intel.com>
  8. * Chandramouli Narayanan <mouli@linux.intel.com>
  9. * Huang Ying <ying.huang@intel.com>
  10. *
  11. * Code to convert EFI to E820 map has been implemented in elilo bootloader
  12. * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
  13. * is setup appropriately for EFI runtime code.
  14. * - mouli 06/14/2007.
  15. *
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/mm.h>
  20. #include <linux/types.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/ioport.h>
  24. #include <linux/module.h>
  25. #include <linux/efi.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/io.h>
  28. #include <linux/reboot.h>
  29. #include <asm/setup.h>
  30. #include <asm/page.h>
  31. #include <asm/e820.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/tlbflush.h>
  34. #include <asm/proto.h>
  35. #include <asm/efi.h>
  36. static pgd_t save_pgd __initdata;
  37. static unsigned long efi_flags __initdata;
  38. static void __init early_mapping_set_exec(unsigned long start,
  39. unsigned long end,
  40. int executable)
  41. {
  42. pte_t *kpte;
  43. int level;
  44. while (start < end) {
  45. kpte = lookup_address((unsigned long)__va(start), &level);
  46. BUG_ON(!kpte);
  47. if (executable)
  48. set_pte(kpte, pte_mkexec(*kpte));
  49. else
  50. set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
  51. __supported_pte_mask));
  52. if (level == 4)
  53. start = (start + PMD_SIZE) & PMD_MASK;
  54. else
  55. start = (start + PAGE_SIZE) & PAGE_MASK;
  56. }
  57. }
  58. static void __init early_runtime_code_mapping_set_exec(int executable)
  59. {
  60. efi_memory_desc_t *md;
  61. void *p;
  62. if (!(__supported_pte_mask & _PAGE_NX))
  63. return;
  64. /* Make EFI runtime service code area executable */
  65. for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
  66. md = p;
  67. if (md->type == EFI_RUNTIME_SERVICES_CODE) {
  68. unsigned long end;
  69. end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
  70. early_mapping_set_exec(md->phys_addr, end, executable);
  71. }
  72. }
  73. }
  74. void __init efi_call_phys_prelog(void)
  75. {
  76. unsigned long vaddress;
  77. local_irq_save(efi_flags);
  78. early_runtime_code_mapping_set_exec(1);
  79. vaddress = (unsigned long)__va(0x0UL);
  80. save_pgd = *pgd_offset_k(0x0UL);
  81. set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
  82. __flush_tlb_all();
  83. }
  84. void __init efi_call_phys_epilog(void)
  85. {
  86. /*
  87. * After the lock is released, the original page table is restored.
  88. */
  89. set_pgd(pgd_offset_k(0x0UL), save_pgd);
  90. early_runtime_code_mapping_set_exec(0);
  91. __flush_tlb_all();
  92. local_irq_restore(efi_flags);
  93. }
  94. void __init efi_reserve_bootmem(void)
  95. {
  96. reserve_bootmem_generic((unsigned long)memmap.phys_map,
  97. memmap.nr_map * memmap.desc_size);
  98. }
  99. void __iomem * __init efi_ioremap(unsigned long offset,
  100. unsigned long size)
  101. {
  102. static unsigned pages_mapped;
  103. unsigned long last_addr;
  104. unsigned i, pages;
  105. last_addr = offset + size - 1;
  106. offset &= PAGE_MASK;
  107. pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
  108. if (pages_mapped + pages > MAX_EFI_IO_PAGES)
  109. return NULL;
  110. for (i = 0; i < pages; i++) {
  111. __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
  112. offset, PAGE_KERNEL_EXEC_NOCACHE);
  113. offset += PAGE_SIZE;
  114. pages_mapped++;
  115. }
  116. return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
  117. (pages_mapped - pages));
  118. }