idmap.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #include <linux/kernel.h>
  2. #include <asm/cputype.h>
  3. #include <asm/idmap.h>
  4. #include <asm/pgalloc.h>
  5. #include <asm/pgtable.h>
  6. #include <asm/sections.h>
  7. pgd_t *idmap_pgd;
  8. static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
  9. unsigned long prot)
  10. {
  11. pmd_t *pmd = pmd_offset(pud, addr);
  12. addr = (addr & PMD_MASK) | prot;
  13. pmd[0] = __pmd(addr);
  14. addr += SECTION_SIZE;
  15. pmd[1] = __pmd(addr);
  16. flush_pmd_entry(pmd);
  17. }
  18. static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
  19. unsigned long prot)
  20. {
  21. pud_t *pud = pud_offset(pgd, addr);
  22. unsigned long next;
  23. do {
  24. next = pud_addr_end(addr, end);
  25. idmap_add_pmd(pud, addr, next, prot);
  26. } while (pud++, addr = next, addr != end);
  27. }
  28. void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
  29. {
  30. unsigned long prot, next;
  31. prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
  32. if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
  33. prot |= PMD_BIT4;
  34. pgd += pgd_index(addr);
  35. do {
  36. next = pgd_addr_end(addr, end);
  37. idmap_add_pud(pgd, addr, next, prot);
  38. } while (pgd++, addr = next, addr != end);
  39. }
  40. #ifdef CONFIG_SMP
  41. static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
  42. {
  43. pmd_t *pmd = pmd_offset(pud, addr);
  44. pmd_clear(pmd);
  45. }
  46. static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
  47. {
  48. pud_t *pud = pud_offset(pgd, addr);
  49. unsigned long next;
  50. do {
  51. next = pud_addr_end(addr, end);
  52. idmap_del_pmd(pud, addr, next);
  53. } while (pud++, addr = next, addr != end);
  54. }
  55. void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
  56. {
  57. unsigned long next;
  58. pgd += pgd_index(addr);
  59. do {
  60. next = pgd_addr_end(addr, end);
  61. idmap_del_pud(pgd, addr, next);
  62. } while (pgd++, addr = next, addr != end);
  63. }
  64. #endif
  65. extern char __idmap_text_start[], __idmap_text_end[];
  66. static int __init init_static_idmap(void)
  67. {
  68. phys_addr_t idmap_start, idmap_end;
  69. idmap_pgd = pgd_alloc(&init_mm);
  70. if (!idmap_pgd)
  71. return -ENOMEM;
  72. /* Add an identity mapping for the physical address of the section. */
  73. idmap_start = virt_to_phys((void *)__idmap_text_start);
  74. idmap_end = virt_to_phys((void *)__idmap_text_end);
  75. pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
  76. (long long)idmap_start, (long long)idmap_end);
  77. identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
  78. return 0;
  79. }
  80. arch_initcall(init_static_idmap);
  81. /*
  82. * In order to soft-boot, we need to insert a 1:1 mapping in place of
  83. * the user-mode pages. This will then ensure that we have predictable
  84. * results when turning the mmu off
  85. */
  86. void setup_mm_for_reboot(void)
  87. {
  88. /*
  89. * We need to access to user-mode page tables here. For kernel threads
  90. * we don't have any user-mode mappings so we use the context that we
  91. * "borrowed".
  92. */
  93. identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
  94. local_flush_tlb_all();
  95. }