book3s_64_mmu_host.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License, version 2, as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/hash.h>
  23. #include <asm/kvm_ppc.h>
  24. #include <asm/kvm_book3s.h>
  25. #include <asm/mmu-hash64.h>
  26. #include <asm/machdep.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/hw_irq.h>
  29. #include "trace.h"
  30. #define PTE_SIZE 12
  31. #define VSID_ALL 0
  32. /* #define DEBUG_SLB */
  33. #ifdef DEBUG_SLB
  34. #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  35. #else
  36. #define dprintk_slb(a, ...) do { } while(0)
  37. #endif
  38. void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  39. {
  40. ppc_md.hpte_invalidate(pte->slot, pte->host_va,
  41. MMU_PAGE_4K, MMU_SEGSIZE_256M,
  42. false);
  43. }
  44. /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  45. * a hash, so we don't waste cycles on looping */
  46. static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  47. {
  48. return hash_64(gvsid, SID_MAP_BITS);
  49. }
  50. static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  51. {
  52. struct kvmppc_sid_map *map;
  53. u16 sid_map_mask;
  54. if (vcpu->arch.shared->msr & MSR_PR)
  55. gvsid |= VSID_PR;
  56. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  57. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  58. if (map->valid && (map->guest_vsid == gvsid)) {
  59. dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
  60. gvsid, map->host_vsid);
  61. return map;
  62. }
  63. map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  64. if (map->valid && (map->guest_vsid == gvsid)) {
  65. dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
  66. gvsid, map->host_vsid);
  67. return map;
  68. }
  69. dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
  70. sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
  71. return NULL;
  72. }
  73. int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
  74. {
  75. pfn_t hpaddr;
  76. ulong hash, hpteg, va;
  77. u64 vsid;
  78. int ret;
  79. int rflags = 0x192;
  80. int vflags = 0;
  81. int attempt = 0;
  82. struct kvmppc_sid_map *map;
  83. /* Get host physical address for gpa */
  84. hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
  85. if (is_error_pfn(hpaddr)) {
  86. printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
  87. return -EINVAL;
  88. }
  89. hpaddr <<= PAGE_SHIFT;
  90. hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
  91. /* and write the mapping ea -> hpa into the pt */
  92. vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
  93. map = find_sid_vsid(vcpu, vsid);
  94. if (!map) {
  95. ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
  96. WARN_ON(ret < 0);
  97. map = find_sid_vsid(vcpu, vsid);
  98. }
  99. if (!map) {
  100. printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
  101. vsid, orig_pte->eaddr);
  102. WARN_ON(true);
  103. return -EINVAL;
  104. }
  105. vsid = map->host_vsid;
  106. va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
  107. if (!orig_pte->may_write)
  108. rflags |= HPTE_R_PP;
  109. else
  110. mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
  111. if (!orig_pte->may_execute)
  112. rflags |= HPTE_R_N;
  113. hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
  114. map_again:
  115. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  116. /* In case we tried normal mapping already, let's nuke old entries */
  117. if (attempt > 1)
  118. if (ppc_md.hpte_remove(hpteg) < 0)
  119. return -1;
  120. ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
  121. if (ret < 0) {
  122. /* If we couldn't map a primary PTE, try a secondary */
  123. hash = ~hash;
  124. vflags ^= HPTE_V_SECONDARY;
  125. attempt++;
  126. goto map_again;
  127. } else {
  128. struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
  129. trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
  130. /* The ppc_md code may give us a secondary entry even though we
  131. asked for a primary. Fix up. */
  132. if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
  133. hash = ~hash;
  134. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  135. }
  136. pte->slot = hpteg + (ret & 7);
  137. pte->host_va = va;
  138. pte->pte = *orig_pte;
  139. pte->pfn = hpaddr >> PAGE_SHIFT;
  140. kvmppc_mmu_hpte_cache_map(vcpu, pte);
  141. }
  142. return 0;
  143. }
  144. static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
  145. {
  146. struct kvmppc_sid_map *map;
  147. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  148. u16 sid_map_mask;
  149. static int backwards_map = 0;
  150. if (vcpu->arch.shared->msr & MSR_PR)
  151. gvsid |= VSID_PR;
  152. /* We might get collisions that trap in preceding order, so let's
  153. map them differently */
  154. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  155. if (backwards_map)
  156. sid_map_mask = SID_MAP_MASK - sid_map_mask;
  157. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  158. /* Make sure we're taking the other map next time */
  159. backwards_map = !backwards_map;
  160. /* Uh-oh ... out of mappings. Let's flush! */
  161. if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
  162. vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
  163. memset(vcpu_book3s->sid_map, 0,
  164. sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
  165. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  166. kvmppc_mmu_flush_segments(vcpu);
  167. }
  168. map->host_vsid = vcpu_book3s->vsid_next++;
  169. map->guest_vsid = gvsid;
  170. map->valid = true;
  171. dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
  172. sid_map_mask, gvsid, map->host_vsid);
  173. return map;
  174. }
  175. static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
  176. {
  177. int i;
  178. int max_slb_size = 64;
  179. int found_inval = -1;
  180. int r;
  181. if (!to_svcpu(vcpu)->slb_max)
  182. to_svcpu(vcpu)->slb_max = 1;
  183. /* Are we overwriting? */
  184. for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
  185. if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
  186. found_inval = i;
  187. else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
  188. return i;
  189. }
  190. /* Found a spare entry that was invalidated before */
  191. if (found_inval > 0)
  192. return found_inval;
  193. /* No spare invalid entry, so create one */
  194. if (mmu_slb_size < 64)
  195. max_slb_size = mmu_slb_size;
  196. /* Overflowing -> purge */
  197. if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
  198. kvmppc_mmu_flush_segments(vcpu);
  199. r = to_svcpu(vcpu)->slb_max;
  200. to_svcpu(vcpu)->slb_max++;
  201. return r;
  202. }
  203. int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
  204. {
  205. u64 esid = eaddr >> SID_SHIFT;
  206. u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
  207. u64 slb_vsid = SLB_VSID_USER;
  208. u64 gvsid;
  209. int slb_index;
  210. struct kvmppc_sid_map *map;
  211. slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
  212. if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
  213. /* Invalidate an entry */
  214. to_svcpu(vcpu)->slb[slb_index].esid = 0;
  215. return -ENOENT;
  216. }
  217. map = find_sid_vsid(vcpu, gvsid);
  218. if (!map)
  219. map = create_sid_map(vcpu, gvsid);
  220. map->guest_esid = esid;
  221. slb_vsid |= (map->host_vsid << 12);
  222. slb_vsid &= ~SLB_VSID_KP;
  223. slb_esid |= slb_index;
  224. to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
  225. to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
  226. dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
  227. return 0;
  228. }
  229. void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
  230. {
  231. to_svcpu(vcpu)->slb_max = 1;
  232. to_svcpu(vcpu)->slb[0].esid = 0;
  233. }
  234. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  235. {
  236. kvmppc_mmu_hpte_destroy(vcpu);
  237. __destroy_context(to_book3s(vcpu)->context_id);
  238. }
  239. int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
  240. {
  241. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  242. int err;
  243. err = __init_new_context();
  244. if (err < 0)
  245. return -1;
  246. vcpu3s->context_id = err;
  247. vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
  248. vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
  249. vcpu3s->vsid_next = vcpu3s->vsid_first;
  250. kvmppc_mmu_hpte_init(vcpu);
  251. return 0;
  252. }