mmu.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mman.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/io.h>
  21. #include <asm/idmap.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/kvm_arm.h>
  24. #include <asm/kvm_mmu.h>
  25. #include <asm/mach/map.h>
  26. extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
  27. static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
  28. static void kvm_set_pte(pte_t *pte, pte_t new_pte)
  29. {
  30. pte_val(*pte) = new_pte;
  31. /*
  32. * flush_pmd_entry just takes a void pointer and cleans the necessary
  33. * cache entries, so we can reuse the function for ptes.
  34. */
  35. flush_pmd_entry(pte);
  36. }
  37. static void free_ptes(pmd_t *pmd, unsigned long addr)
  38. {
  39. pte_t *pte;
  40. unsigned int i;
  41. for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
  42. if (!pmd_none(*pmd) && pmd_table(*pmd)) {
  43. pte = pte_offset_kernel(pmd, addr);
  44. pte_free_kernel(NULL, pte);
  45. }
  46. pmd++;
  47. }
  48. }
  49. /**
  50. * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
  51. *
  52. * Assumes this is a page table used strictly in Hyp-mode and therefore contains
  53. * only mappings in the kernel memory area, which is above PAGE_OFFSET.
  54. */
  55. void free_hyp_pmds(void)
  56. {
  57. pgd_t *pgd;
  58. pud_t *pud;
  59. pmd_t *pmd;
  60. unsigned long addr;
  61. mutex_lock(&kvm_hyp_pgd_mutex);
  62. for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
  63. pgd = hyp_pgd + pgd_index(addr);
  64. pud = pud_offset(pgd, addr);
  65. if (pud_none(*pud))
  66. continue;
  67. BUG_ON(pud_bad(*pud));
  68. pmd = pmd_offset(pud, addr);
  69. free_ptes(pmd, addr);
  70. pmd_free(NULL, pmd);
  71. pud_clear(pud);
  72. }
  73. mutex_unlock(&kvm_hyp_pgd_mutex);
  74. }
  75. static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
  76. unsigned long end)
  77. {
  78. pte_t *pte;
  79. unsigned long addr;
  80. struct page *page;
  81. for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
  82. pte = pte_offset_kernel(pmd, addr);
  83. BUG_ON(!virt_addr_valid(addr));
  84. page = virt_to_page(addr);
  85. kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
  86. }
  87. }
  88. static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
  89. unsigned long end,
  90. unsigned long *pfn_base)
  91. {
  92. pte_t *pte;
  93. unsigned long addr;
  94. for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
  95. pte = pte_offset_kernel(pmd, addr);
  96. BUG_ON(pfn_valid(*pfn_base));
  97. kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
  98. (*pfn_base)++;
  99. }
  100. }
  101. static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
  102. unsigned long end, unsigned long *pfn_base)
  103. {
  104. pmd_t *pmd;
  105. pte_t *pte;
  106. unsigned long addr, next;
  107. for (addr = start; addr < end; addr = next) {
  108. pmd = pmd_offset(pud, addr);
  109. BUG_ON(pmd_sect(*pmd));
  110. if (pmd_none(*pmd)) {
  111. pte = pte_alloc_one_kernel(NULL, addr);
  112. if (!pte) {
  113. kvm_err("Cannot allocate Hyp pte\n");
  114. return -ENOMEM;
  115. }
  116. pmd_populate_kernel(NULL, pmd, pte);
  117. }
  118. next = pmd_addr_end(addr, end);
  119. /*
  120. * If pfn_base is NULL, we map kernel pages into HYP with the
  121. * virtual address. Otherwise, this is considered an I/O
  122. * mapping and we map the physical region starting at
  123. * *pfn_base to [start, end[.
  124. */
  125. if (!pfn_base)
  126. create_hyp_pte_mappings(pmd, addr, next);
  127. else
  128. create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
  129. }
  130. return 0;
  131. }
  132. static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
  133. {
  134. unsigned long start = (unsigned long)from;
  135. unsigned long end = (unsigned long)to;
  136. pgd_t *pgd;
  137. pud_t *pud;
  138. pmd_t *pmd;
  139. unsigned long addr, next;
  140. int err = 0;
  141. BUG_ON(start > end);
  142. if (start < PAGE_OFFSET)
  143. return -EINVAL;
  144. mutex_lock(&kvm_hyp_pgd_mutex);
  145. for (addr = start; addr < end; addr = next) {
  146. pgd = hyp_pgd + pgd_index(addr);
  147. pud = pud_offset(pgd, addr);
  148. if (pud_none_or_clear_bad(pud)) {
  149. pmd = pmd_alloc_one(NULL, addr);
  150. if (!pmd) {
  151. kvm_err("Cannot allocate Hyp pmd\n");
  152. err = -ENOMEM;
  153. goto out;
  154. }
  155. pud_populate(NULL, pud, pmd);
  156. }
  157. next = pgd_addr_end(addr, end);
  158. err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
  159. if (err)
  160. goto out;
  161. }
  162. out:
  163. mutex_unlock(&kvm_hyp_pgd_mutex);
  164. return err;
  165. }
  166. /**
  167. * create_hyp_mappings - map a kernel virtual address range in Hyp mode
  168. * @from: The virtual kernel start address of the range
  169. * @to: The virtual kernel end address of the range (exclusive)
  170. *
  171. * The same virtual address as the kernel virtual address is also used in
  172. * Hyp-mode mapping to the same underlying physical pages.
  173. *
  174. * Note: Wrapping around zero in the "to" address is not supported.
  175. */
  176. int create_hyp_mappings(void *from, void *to)
  177. {
  178. return __create_hyp_mappings(from, to, NULL);
  179. }
  180. /**
  181. * create_hyp_io_mappings - map a physical IO range in Hyp mode
  182. * @from: The virtual HYP start address of the range
  183. * @to: The virtual HYP end address of the range (exclusive)
  184. * @addr: The physical start address which gets mapped
  185. */
  186. int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
  187. {
  188. unsigned long pfn = __phys_to_pfn(addr);
  189. return __create_hyp_mappings(from, to, &pfn);
  190. }
  191. int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
  192. {
  193. return -EINVAL;
  194. }
  195. phys_addr_t kvm_mmu_get_httbr(void)
  196. {
  197. VM_BUG_ON(!virt_addr_valid(hyp_pgd));
  198. return virt_to_phys(hyp_pgd);
  199. }
  200. int kvm_mmu_init(void)
  201. {
  202. return hyp_pgd ? 0 : -ENOMEM;
  203. }
  204. /**
  205. * kvm_clear_idmap - remove all idmaps from the hyp pgd
  206. *
  207. * Free the underlying pmds for all pgds in range and clear the pgds (but
  208. * don't free them) afterwards.
  209. */
  210. void kvm_clear_hyp_idmap(void)
  211. {
  212. unsigned long addr, end;
  213. unsigned long next;
  214. pgd_t *pgd = hyp_pgd;
  215. pud_t *pud;
  216. pmd_t *pmd;
  217. addr = virt_to_phys(__hyp_idmap_text_start);
  218. end = virt_to_phys(__hyp_idmap_text_end);
  219. pgd += pgd_index(addr);
  220. do {
  221. next = pgd_addr_end(addr, end);
  222. if (pgd_none_or_clear_bad(pgd))
  223. continue;
  224. pud = pud_offset(pgd, addr);
  225. pmd = pmd_offset(pud, addr);
  226. pud_clear(pud);
  227. clean_pmd_entry(pmd);
  228. pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
  229. } while (pgd++, addr = next, addr < end);
  230. }