book3s_mmu_hpte.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License, version 2, as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  19. */
  20. #include <linux/kvm_host.h>
  21. #include <linux/hash.h>
  22. #include <linux/slab.h>
  23. #include <asm/kvm_ppc.h>
  24. #include <asm/kvm_book3s.h>
  25. #include <asm/machdep.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/hw_irq.h>
  28. #define PTE_SIZE 12
  29. /* #define DEBUG_MMU */
  30. #ifdef DEBUG_MMU
  31. #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  32. #else
  33. #define dprintk_mmu(a, ...) do { } while(0)
  34. #endif
  35. static struct kmem_cache *hpte_cache;
  36. static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
  37. {
  38. return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
  39. }
  40. static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
  41. {
  42. return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
  43. }
  44. static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
  45. {
  46. return hash_64((vpage & 0xffffff000ULL) >> 12,
  47. HPTEG_HASH_BITS_VPTE_LONG);
  48. }
  49. void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  50. {
  51. u64 index;
  52. /* Add to ePTE list */
  53. index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
  54. hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
  55. /* Add to vPTE list */
  56. index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
  57. hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
  58. /* Add to vPTE_long list */
  59. index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
  60. hlist_add_head(&pte->list_vpte_long,
  61. &vcpu->arch.hpte_hash_vpte_long[index]);
  62. }
  63. static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  64. {
  65. dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
  66. pte->pte.eaddr, pte->pte.vpage, pte->host_va);
  67. /* Different for 32 and 64 bit */
  68. kvmppc_mmu_invalidate_pte(vcpu, pte);
  69. if (pte->pte.may_write)
  70. kvm_release_pfn_dirty(pte->pfn);
  71. else
  72. kvm_release_pfn_clean(pte->pfn);
  73. hlist_del(&pte->list_pte);
  74. hlist_del(&pte->list_vpte);
  75. hlist_del(&pte->list_vpte_long);
  76. vcpu->arch.hpte_cache_count--;
  77. kmem_cache_free(hpte_cache, pte);
  78. }
  79. static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
  80. {
  81. struct hpte_cache *pte;
  82. struct hlist_node *node, *tmp;
  83. int i;
  84. for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
  85. struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
  86. hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
  87. invalidate_pte(vcpu, pte);
  88. }
  89. }
  90. static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
  91. {
  92. struct hlist_head *list;
  93. struct hlist_node *node, *tmp;
  94. struct hpte_cache *pte;
  95. /* Find the list of entries in the map */
  96. list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
  97. /* Check the list for matching entries and invalidate */
  98. hlist_for_each_entry_safe(pte, node, tmp, list, list_pte)
  99. if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
  100. invalidate_pte(vcpu, pte);
  101. }
  102. void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
  103. {
  104. u64 i;
  105. dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
  106. vcpu->arch.hpte_cache_count, guest_ea, ea_mask);
  107. guest_ea &= ea_mask;
  108. switch (ea_mask) {
  109. case ~0xfffUL:
  110. kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
  111. break;
  112. case 0x0ffff000:
  113. /* 32-bit flush w/o segment, go through all possible segments */
  114. for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL)
  115. kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL);
  116. break;
  117. case 0:
  118. /* Doing a complete flush -> start from scratch */
  119. kvmppc_mmu_pte_flush_all(vcpu);
  120. break;
  121. default:
  122. WARN_ON(1);
  123. break;
  124. }
  125. }
  126. /* Flush with mask 0xfffffffff */
  127. static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
  128. {
  129. struct hlist_head *list;
  130. struct hlist_node *node, *tmp;
  131. struct hpte_cache *pte;
  132. u64 vp_mask = 0xfffffffffULL;
  133. list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
  134. /* Check the list for matching entries and invalidate */
  135. hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte)
  136. if ((pte->pte.vpage & vp_mask) == guest_vp)
  137. invalidate_pte(vcpu, pte);
  138. }
  139. /* Flush with mask 0xffffff000 */
  140. static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
  141. {
  142. struct hlist_head *list;
  143. struct hlist_node *node, *tmp;
  144. struct hpte_cache *pte;
  145. u64 vp_mask = 0xffffff000ULL;
  146. list = &vcpu->arch.hpte_hash_vpte_long[
  147. kvmppc_mmu_hash_vpte_long(guest_vp)];
  148. /* Check the list for matching entries and invalidate */
  149. hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
  150. if ((pte->pte.vpage & vp_mask) == guest_vp)
  151. invalidate_pte(vcpu, pte);
  152. }
  153. void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
  154. {
  155. dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
  156. vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
  157. guest_vp &= vp_mask;
  158. switch(vp_mask) {
  159. case 0xfffffffffULL:
  160. kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
  161. break;
  162. case 0xffffff000ULL:
  163. kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
  164. break;
  165. default:
  166. WARN_ON(1);
  167. return;
  168. }
  169. }
  170. void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
  171. {
  172. struct hlist_node *node, *tmp;
  173. struct hpte_cache *pte;
  174. int i;
  175. dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
  176. vcpu->arch.hpte_cache_count, pa_start, pa_end);
  177. for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
  178. struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
  179. hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long)
  180. if ((pte->pte.raddr >= pa_start) &&
  181. (pte->pte.raddr < pa_end))
  182. invalidate_pte(vcpu, pte);
  183. }
  184. }
  185. struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
  186. {
  187. struct hpte_cache *pte;
  188. pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
  189. vcpu->arch.hpte_cache_count++;
  190. if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
  191. kvmppc_mmu_pte_flush_all(vcpu);
  192. return pte;
  193. }
  194. void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
  195. {
  196. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  197. }
  198. static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
  199. {
  200. int i;
  201. for (i = 0; i < len; i++)
  202. INIT_HLIST_HEAD(&hash_list[i]);
  203. }
  204. int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
  205. {
  206. /* init hpte lookup hashes */
  207. kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
  208. ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
  209. kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
  210. ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
  211. kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
  212. ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
  213. return 0;
  214. }
  215. int kvmppc_mmu_hpte_sysinit(void)
  216. {
  217. /* init hpte slab cache */
  218. hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
  219. sizeof(struct hpte_cache), 0, NULL);
  220. return 0;
  221. }
  222. void kvmppc_mmu_hpte_sysexit(void)
  223. {
  224. kmem_cache_destroy(hpte_cache);
  225. }