book3s_64_mmu_host.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License, version 2, as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <asm/kvm_ppc.h>
  23. #include <asm/kvm_book3s.h>
  24. #include <asm/mmu-hash64.h>
  25. #include <asm/machdep.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/hw_irq.h>
  28. #define PTE_SIZE 12
  29. #define VSID_ALL 0
  30. /* #define DEBUG_MMU */
  31. /* #define DEBUG_SLB */
  32. #ifdef DEBUG_MMU
  33. #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  34. #else
  35. #define dprintk_mmu(a, ...) do { } while(0)
  36. #endif
  37. #ifdef DEBUG_SLB
  38. #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
  39. #else
  40. #define dprintk_slb(a, ...) do { } while(0)
  41. #endif
  42. static void invalidate_pte(struct hpte_cache *pte)
  43. {
  44. dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
  45. pte->pte.eaddr, pte->pte.vpage, pte->host_va);
  46. ppc_md.hpte_invalidate(pte->slot, pte->host_va,
  47. MMU_PAGE_4K, MMU_SEGSIZE_256M,
  48. false);
  49. pte->host_va = 0;
  50. if (pte->pte.may_write)
  51. kvm_release_pfn_dirty(pte->pfn);
  52. else
  53. kvm_release_pfn_clean(pte->pfn);
  54. }
  55. void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
  56. {
  57. int i;
  58. dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
  59. vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
  60. BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
  61. guest_ea &= ea_mask;
  62. for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
  63. struct hpte_cache *pte;
  64. pte = &vcpu->arch.hpte_cache[i];
  65. if (!pte->host_va)
  66. continue;
  67. if ((pte->pte.eaddr & ea_mask) == guest_ea) {
  68. invalidate_pte(pte);
  69. }
  70. }
  71. /* Doing a complete flush -> start from scratch */
  72. if (!ea_mask)
  73. vcpu->arch.hpte_cache_offset = 0;
  74. }
  75. void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
  76. {
  77. int i;
  78. dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
  79. vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
  80. BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
  81. guest_vp &= vp_mask;
  82. for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
  83. struct hpte_cache *pte;
  84. pte = &vcpu->arch.hpte_cache[i];
  85. if (!pte->host_va)
  86. continue;
  87. if ((pte->pte.vpage & vp_mask) == guest_vp) {
  88. invalidate_pte(pte);
  89. }
  90. }
  91. }
  92. void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
  93. {
  94. int i;
  95. dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
  96. vcpu->arch.hpte_cache_offset, pa_start, pa_end);
  97. BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
  98. for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
  99. struct hpte_cache *pte;
  100. pte = &vcpu->arch.hpte_cache[i];
  101. if (!pte->host_va)
  102. continue;
  103. if ((pte->pte.raddr >= pa_start) &&
  104. (pte->pte.raddr < pa_end)) {
  105. invalidate_pte(pte);
  106. }
  107. }
  108. }
  109. static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
  110. {
  111. if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
  112. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  113. return vcpu->arch.hpte_cache_offset++;
  114. }
  115. /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
  116. * a hash, so we don't waste cycles on looping */
  117. static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
  118. {
  119. return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
  120. ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
  121. ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
  122. ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
  123. ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
  124. ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
  125. ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
  126. ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
  127. }
  128. static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
  129. {
  130. struct kvmppc_sid_map *map;
  131. u16 sid_map_mask;
  132. if (vcpu->arch.msr & MSR_PR)
  133. gvsid |= VSID_PR;
  134. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  135. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  136. if (map->guest_vsid == gvsid) {
  137. dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
  138. gvsid, map->host_vsid);
  139. return map;
  140. }
  141. map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
  142. if (map->guest_vsid == gvsid) {
  143. dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
  144. gvsid, map->host_vsid);
  145. return map;
  146. }
  147. dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
  148. sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
  149. return NULL;
  150. }
  151. int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
  152. {
  153. pfn_t hpaddr;
  154. ulong hash, hpteg, va;
  155. u64 vsid;
  156. int ret;
  157. int rflags = 0x192;
  158. int vflags = 0;
  159. int attempt = 0;
  160. struct kvmppc_sid_map *map;
  161. /* Get host physical address for gpa */
  162. hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
  163. if (kvm_is_error_hva(hpaddr)) {
  164. printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
  165. return -EINVAL;
  166. }
  167. hpaddr <<= PAGE_SHIFT;
  168. #if PAGE_SHIFT == 12
  169. #elif PAGE_SHIFT == 16
  170. hpaddr |= orig_pte->raddr & 0xf000;
  171. #else
  172. #error Unknown page size
  173. #endif
  174. /* and write the mapping ea -> hpa into the pt */
  175. vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
  176. map = find_sid_vsid(vcpu, vsid);
  177. if (!map) {
  178. ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
  179. WARN_ON(ret < 0);
  180. map = find_sid_vsid(vcpu, vsid);
  181. }
  182. if (!map) {
  183. printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
  184. vsid, orig_pte->eaddr);
  185. WARN_ON(true);
  186. return -EINVAL;
  187. }
  188. vsid = map->host_vsid;
  189. va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
  190. if (!orig_pte->may_write)
  191. rflags |= HPTE_R_PP;
  192. else
  193. mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
  194. if (!orig_pte->may_execute)
  195. rflags |= HPTE_R_N;
  196. hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
  197. map_again:
  198. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  199. /* In case we tried normal mapping already, let's nuke old entries */
  200. if (attempt > 1)
  201. if (ppc_md.hpte_remove(hpteg) < 0)
  202. return -1;
  203. ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
  204. if (ret < 0) {
  205. /* If we couldn't map a primary PTE, try a secondary */
  206. hash = ~hash;
  207. vflags ^= HPTE_V_SECONDARY;
  208. attempt++;
  209. goto map_again;
  210. } else {
  211. int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
  212. struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
  213. dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
  214. ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
  215. (rflags & HPTE_R_N) ? '-' : 'x',
  216. orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
  217. /* The ppc_md code may give us a secondary entry even though we
  218. asked for a primary. Fix up. */
  219. if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
  220. hash = ~hash;
  221. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  222. }
  223. pte->slot = hpteg + (ret & 7);
  224. pte->host_va = va;
  225. pte->pte = *orig_pte;
  226. pte->pfn = hpaddr >> PAGE_SHIFT;
  227. }
  228. return 0;
  229. }
  230. static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
  231. {
  232. struct kvmppc_sid_map *map;
  233. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  234. u16 sid_map_mask;
  235. static int backwards_map = 0;
  236. if (vcpu->arch.msr & MSR_PR)
  237. gvsid |= VSID_PR;
  238. /* We might get collisions that trap in preceding order, so let's
  239. map them differently */
  240. sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
  241. if (backwards_map)
  242. sid_map_mask = SID_MAP_MASK - sid_map_mask;
  243. map = &to_book3s(vcpu)->sid_map[sid_map_mask];
  244. /* Make sure we're taking the other map next time */
  245. backwards_map = !backwards_map;
  246. /* Uh-oh ... out of mappings. Let's flush! */
  247. if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
  248. vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
  249. memset(vcpu_book3s->sid_map, 0,
  250. sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
  251. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  252. kvmppc_mmu_flush_segments(vcpu);
  253. }
  254. map->host_vsid = vcpu_book3s->vsid_next++;
  255. map->guest_vsid = gvsid;
  256. map->valid = true;
  257. dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
  258. sid_map_mask, gvsid, map->host_vsid);
  259. return map;
  260. }
  261. static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
  262. {
  263. int i;
  264. int max_slb_size = 64;
  265. int found_inval = -1;
  266. int r;
  267. if (!to_svcpu(vcpu)->slb_max)
  268. to_svcpu(vcpu)->slb_max = 1;
  269. /* Are we overwriting? */
  270. for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
  271. if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
  272. found_inval = i;
  273. else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
  274. return i;
  275. }
  276. /* Found a spare entry that was invalidated before */
  277. if (found_inval > 0)
  278. return found_inval;
  279. /* No spare invalid entry, so create one */
  280. if (mmu_slb_size < 64)
  281. max_slb_size = mmu_slb_size;
  282. /* Overflowing -> purge */
  283. if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
  284. kvmppc_mmu_flush_segments(vcpu);
  285. r = to_svcpu(vcpu)->slb_max;
  286. to_svcpu(vcpu)->slb_max++;
  287. return r;
  288. }
  289. int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
  290. {
  291. u64 esid = eaddr >> SID_SHIFT;
  292. u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
  293. u64 slb_vsid = SLB_VSID_USER;
  294. u64 gvsid;
  295. int slb_index;
  296. struct kvmppc_sid_map *map;
  297. slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
  298. if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
  299. /* Invalidate an entry */
  300. to_svcpu(vcpu)->slb[slb_index].esid = 0;
  301. return -ENOENT;
  302. }
  303. map = find_sid_vsid(vcpu, gvsid);
  304. if (!map)
  305. map = create_sid_map(vcpu, gvsid);
  306. map->guest_esid = esid;
  307. slb_vsid |= (map->host_vsid << 12);
  308. slb_vsid &= ~SLB_VSID_KP;
  309. slb_esid |= slb_index;
  310. to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
  311. to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
  312. dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
  313. return 0;
  314. }
  315. void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
  316. {
  317. to_svcpu(vcpu)->slb_max = 1;
  318. to_svcpu(vcpu)->slb[0].esid = 0;
  319. }
  320. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  321. {
  322. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  323. __destroy_context(to_book3s(vcpu)->context_id);
  324. }
  325. int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
  326. {
  327. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  328. int err;
  329. err = __init_new_context();
  330. if (err < 0)
  331. return -1;
  332. vcpu3s->context_id = err;
  333. vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
  334. vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
  335. vcpu3s->vsid_next = vcpu3s->vsid_first;
  336. return 0;
  337. }