e500_mmu_host.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
  3. *
  4. * Author: Yu Liu, yu.liu@freescale.com
  5. * Scott Wood, scottwood@freescale.com
  6. * Ashish Kalra, ashish.kalra@freescale.com
  7. * Varun Sethi, varun.sethi@freescale.com
  8. * Alexander Graf, agraf@suse.de
  9. *
  10. * Description:
  11. * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12. * by Hollis Blanchard <hollisb@us.ibm.com>.
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License, version 2, as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/slab.h>
  21. #include <linux/string.h>
  22. #include <linux/kvm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/highmem.h>
  25. #include <linux/log2.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/sched.h>
  28. #include <linux/rwsem.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/hugetlb.h>
  31. #include <asm/kvm_ppc.h>
  32. #include "e500.h"
  33. #include "trace.h"
  34. #include "timing.h"
  35. #include "e500_mmu_host.h"
  36. #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
  37. static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
  38. static inline unsigned int tlb1_max_shadow_size(void)
  39. {
  40. /* reserve one entry for magic page */
  41. return host_tlb_params[1].entries - tlbcam_index - 1;
  42. }
  43. static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  44. {
  45. /* Mask off reserved bits. */
  46. mas3 &= MAS3_ATTRIB_MASK;
  47. #ifndef CONFIG_KVM_BOOKE_HV
  48. if (!usermode) {
  49. /* Guest is in supervisor mode,
  50. * so we need to translate guest
  51. * supervisor permissions into user permissions. */
  52. mas3 &= ~E500_TLB_USER_PERM_MASK;
  53. mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  54. }
  55. mas3 |= E500_TLB_SUPER_PERM_MASK;
  56. #endif
  57. return mas3;
  58. }
  59. static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
  60. {
  61. #ifdef CONFIG_SMP
  62. return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
  63. #else
  64. return mas2 & MAS2_ATTRIB_MASK;
  65. #endif
  66. }
  67. /*
  68. * writing shadow tlb entry to host TLB
  69. */
  70. static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
  71. uint32_t mas0)
  72. {
  73. unsigned long flags;
  74. local_irq_save(flags);
  75. mtspr(SPRN_MAS0, mas0);
  76. mtspr(SPRN_MAS1, stlbe->mas1);
  77. mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
  78. mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
  79. mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
  80. #ifdef CONFIG_KVM_BOOKE_HV
  81. mtspr(SPRN_MAS8, stlbe->mas8);
  82. #endif
  83. asm volatile("isync; tlbwe" : : : "memory");
  84. #ifdef CONFIG_KVM_BOOKE_HV
  85. /* Must clear mas8 for other host tlbwe's */
  86. mtspr(SPRN_MAS8, 0);
  87. isync();
  88. #endif
  89. local_irq_restore(flags);
  90. trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
  91. stlbe->mas2, stlbe->mas7_3);
  92. }
  93. /*
  94. * Acquire a mas0 with victim hint, as if we just took a TLB miss.
  95. *
  96. * We don't care about the address we're searching for, other than that it's
  97. * in the right set and is not present in the TLB. Using a zero PID and a
  98. * userspace address means we don't have to set and then restore MAS5, or
  99. * calculate a proper MAS6 value.
  100. */
  101. static u32 get_host_mas0(unsigned long eaddr)
  102. {
  103. unsigned long flags;
  104. u32 mas0;
  105. local_irq_save(flags);
  106. mtspr(SPRN_MAS6, 0);
  107. asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
  108. mas0 = mfspr(SPRN_MAS0);
  109. local_irq_restore(flags);
  110. return mas0;
  111. }
  112. /* sesel is for tlb1 only */
  113. static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
  114. int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
  115. {
  116. u32 mas0;
  117. if (tlbsel == 0) {
  118. mas0 = get_host_mas0(stlbe->mas2);
  119. __write_host_tlbe(stlbe, mas0);
  120. } else {
  121. __write_host_tlbe(stlbe,
  122. MAS0_TLBSEL(1) |
  123. MAS0_ESEL(to_htlb1_esel(sesel)));
  124. }
  125. }
  126. /* sesel is for tlb1 only */
  127. static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
  128. struct kvm_book3e_206_tlb_entry *gtlbe,
  129. struct kvm_book3e_206_tlb_entry *stlbe,
  130. int stlbsel, int sesel)
  131. {
  132. int stid;
  133. preempt_disable();
  134. stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
  135. stlbe->mas1 |= MAS1_TID(stid);
  136. write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
  137. preempt_enable();
  138. }
  139. #ifdef CONFIG_KVM_E500V2
  140. /* XXX should be a hook in the gva2hpa translation */
  141. void kvmppc_map_magic(struct kvm_vcpu *vcpu)
  142. {
  143. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  144. struct kvm_book3e_206_tlb_entry magic;
  145. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  146. unsigned int stid;
  147. pfn_t pfn;
  148. pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
  149. get_page(pfn_to_page(pfn));
  150. preempt_disable();
  151. stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
  152. magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
  153. MAS1_TSIZE(BOOK3E_PAGESZ_4K);
  154. magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
  155. magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
  156. MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
  157. magic.mas8 = 0;
  158. __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
  159. preempt_enable();
  160. }
  161. #endif
  162. void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
  163. int esel)
  164. {
  165. struct kvm_book3e_206_tlb_entry *gtlbe =
  166. get_entry(vcpu_e500, tlbsel, esel);
  167. struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
  168. /* Don't bother with unmapped entries */
  169. if (!(ref->flags & E500_TLB_VALID))
  170. return;
  171. if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
  172. u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
  173. int hw_tlb_indx;
  174. unsigned long flags;
  175. local_irq_save(flags);
  176. while (tmp) {
  177. hw_tlb_indx = __ilog2_u64(tmp & -tmp);
  178. mtspr(SPRN_MAS0,
  179. MAS0_TLBSEL(1) |
  180. MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
  181. mtspr(SPRN_MAS1, 0);
  182. asm volatile("tlbwe");
  183. vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
  184. tmp &= tmp - 1;
  185. }
  186. mb();
  187. vcpu_e500->g2h_tlb1_map[esel] = 0;
  188. ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
  189. local_irq_restore(flags);
  190. }
  191. if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
  192. /*
  193. * TLB1 entry is backed by 4k pages. This should happen
  194. * rarely and is not worth optimizing. Invalidate everything.
  195. */
  196. kvmppc_e500_tlbil_all(vcpu_e500);
  197. ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
  198. }
  199. /* Already invalidated in between */
  200. if (!(ref->flags & E500_TLB_VALID))
  201. return;
  202. /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
  203. kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
  204. /* Mark the TLB as not backed by the host anymore */
  205. ref->flags &= ~E500_TLB_VALID;
  206. }
  207. static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
  208. {
  209. return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
  210. }
  211. static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
  212. struct kvm_book3e_206_tlb_entry *gtlbe,
  213. pfn_t pfn)
  214. {
  215. ref->pfn = pfn;
  216. ref->flags = E500_TLB_VALID;
  217. if (tlbe_is_writable(gtlbe))
  218. kvm_set_pfn_dirty(pfn);
  219. }
  220. static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
  221. {
  222. if (ref->flags & E500_TLB_VALID) {
  223. trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
  224. ref->flags = 0;
  225. }
  226. }
  227. static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
  228. {
  229. if (vcpu_e500->g2h_tlb1_map)
  230. memset(vcpu_e500->g2h_tlb1_map, 0,
  231. sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
  232. if (vcpu_e500->h2g_tlb1_rmap)
  233. memset(vcpu_e500->h2g_tlb1_rmap, 0,
  234. sizeof(unsigned int) * host_tlb_params[1].entries);
  235. }
  236. static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
  237. {
  238. int tlbsel = 0;
  239. int i;
  240. for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
  241. struct tlbe_ref *ref =
  242. &vcpu_e500->gtlb_priv[tlbsel][i].ref;
  243. kvmppc_e500_ref_release(ref);
  244. }
  245. }
  246. static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
  247. {
  248. int stlbsel = 1;
  249. int i;
  250. kvmppc_e500_tlbil_all(vcpu_e500);
  251. for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
  252. struct tlbe_ref *ref =
  253. &vcpu_e500->tlb_refs[stlbsel][i];
  254. kvmppc_e500_ref_release(ref);
  255. }
  256. clear_tlb_privs(vcpu_e500);
  257. }
  258. void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
  259. {
  260. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  261. clear_tlb_refs(vcpu_e500);
  262. clear_tlb1_bitmap(vcpu_e500);
  263. }
  264. /* TID must be supplied by the caller */
  265. static void kvmppc_e500_setup_stlbe(
  266. struct kvm_vcpu *vcpu,
  267. struct kvm_book3e_206_tlb_entry *gtlbe,
  268. int tsize, struct tlbe_ref *ref, u64 gvaddr,
  269. struct kvm_book3e_206_tlb_entry *stlbe)
  270. {
  271. pfn_t pfn = ref->pfn;
  272. u32 pr = vcpu->arch.shared->msr & MSR_PR;
  273. BUG_ON(!(ref->flags & E500_TLB_VALID));
  274. /* Force IPROT=0 for all guest mappings. */
  275. stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
  276. stlbe->mas2 = (gvaddr & MAS2_EPN) |
  277. e500_shadow_mas2_attrib(gtlbe->mas2, pr);
  278. stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
  279. e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
  280. #ifdef CONFIG_KVM_BOOKE_HV
  281. stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
  282. #endif
  283. }
  284. static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  285. u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
  286. int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
  287. struct tlbe_ref *ref)
  288. {
  289. struct kvm_memory_slot *slot;
  290. unsigned long pfn = 0; /* silence GCC warning */
  291. unsigned long hva;
  292. int pfnmap = 0;
  293. int tsize = BOOK3E_PAGESZ_4K;
  294. /*
  295. * Translate guest physical to true physical, acquiring
  296. * a page reference if it is normal, non-reserved memory.
  297. *
  298. * gfn_to_memslot() must succeed because otherwise we wouldn't
  299. * have gotten this far. Eventually we should just pass the slot
  300. * pointer through from the first lookup.
  301. */
  302. slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
  303. hva = gfn_to_hva_memslot(slot, gfn);
  304. if (tlbsel == 1) {
  305. struct vm_area_struct *vma;
  306. down_read(&current->mm->mmap_sem);
  307. vma = find_vma(current->mm, hva);
  308. if (vma && hva >= vma->vm_start &&
  309. (vma->vm_flags & VM_PFNMAP)) {
  310. /*
  311. * This VMA is a physically contiguous region (e.g.
  312. * /dev/mem) that bypasses normal Linux page
  313. * management. Find the overlap between the
  314. * vma and the memslot.
  315. */
  316. unsigned long start, end;
  317. unsigned long slot_start, slot_end;
  318. pfnmap = 1;
  319. start = vma->vm_pgoff;
  320. end = start +
  321. ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  322. pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
  323. slot_start = pfn - (gfn - slot->base_gfn);
  324. slot_end = slot_start + slot->npages;
  325. if (start < slot_start)
  326. start = slot_start;
  327. if (end > slot_end)
  328. end = slot_end;
  329. tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
  330. MAS1_TSIZE_SHIFT;
  331. /*
  332. * e500 doesn't implement the lowest tsize bit,
  333. * or 1K pages.
  334. */
  335. tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
  336. /*
  337. * Now find the largest tsize (up to what the guest
  338. * requested) that will cover gfn, stay within the
  339. * range, and for which gfn and pfn are mutually
  340. * aligned.
  341. */
  342. for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
  343. unsigned long gfn_start, gfn_end, tsize_pages;
  344. tsize_pages = 1 << (tsize - 2);
  345. gfn_start = gfn & ~(tsize_pages - 1);
  346. gfn_end = gfn_start + tsize_pages;
  347. if (gfn_start + pfn - gfn < start)
  348. continue;
  349. if (gfn_end + pfn - gfn > end)
  350. continue;
  351. if ((gfn & (tsize_pages - 1)) !=
  352. (pfn & (tsize_pages - 1)))
  353. continue;
  354. gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
  355. pfn &= ~(tsize_pages - 1);
  356. break;
  357. }
  358. } else if (vma && hva >= vma->vm_start &&
  359. (vma->vm_flags & VM_HUGETLB)) {
  360. unsigned long psize = vma_kernel_pagesize(vma);
  361. tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
  362. MAS1_TSIZE_SHIFT;
  363. /*
  364. * Take the largest page size that satisfies both host
  365. * and guest mapping
  366. */
  367. tsize = min(__ilog2(psize) - 10, tsize);
  368. /*
  369. * e500 doesn't implement the lowest tsize bit,
  370. * or 1K pages.
  371. */
  372. tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
  373. }
  374. up_read(&current->mm->mmap_sem);
  375. }
  376. if (likely(!pfnmap)) {
  377. unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
  378. pfn = gfn_to_pfn_memslot(slot, gfn);
  379. if (is_error_noslot_pfn(pfn)) {
  380. printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
  381. (long)gfn);
  382. return -EINVAL;
  383. }
  384. /* Align guest and physical address to page map boundaries */
  385. pfn &= ~(tsize_pages - 1);
  386. gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
  387. }
  388. /* Drop old ref and setup new one. */
  389. kvmppc_e500_ref_release(ref);
  390. kvmppc_e500_ref_setup(ref, gtlbe, pfn);
  391. kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
  392. ref, gvaddr, stlbe);
  393. /* Clear i-cache for new pages */
  394. kvmppc_mmu_flush_icache(pfn);
  395. /* Drop refcount on page, so that mmu notifiers can clear it */
  396. kvm_release_pfn_clean(pfn);
  397. return 0;
  398. }
  399. /* XXX only map the one-one case, for now use TLB0 */
  400. static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
  401. struct kvm_book3e_206_tlb_entry *stlbe)
  402. {
  403. struct kvm_book3e_206_tlb_entry *gtlbe;
  404. struct tlbe_ref *ref;
  405. int stlbsel = 0;
  406. int sesel = 0;
  407. int r;
  408. gtlbe = get_entry(vcpu_e500, 0, esel);
  409. ref = &vcpu_e500->gtlb_priv[0][esel].ref;
  410. r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
  411. get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
  412. gtlbe, 0, stlbe, ref);
  413. if (r)
  414. return r;
  415. write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
  416. return 0;
  417. }
  418. static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
  419. struct tlbe_ref *ref,
  420. int esel)
  421. {
  422. unsigned int sesel = vcpu_e500->host_tlb1_nv++;
  423. if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
  424. vcpu_e500->host_tlb1_nv = 0;
  425. vcpu_e500->tlb_refs[1][sesel] = *ref;
  426. vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
  427. vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
  428. if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
  429. unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
  430. vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
  431. }
  432. vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
  433. return sesel;
  434. }
  435. /* Caller must ensure that the specified guest TLB entry is safe to insert into
  436. * the shadow TLB. */
  437. /* For both one-one and one-to-many */
  438. static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  439. u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
  440. struct kvm_book3e_206_tlb_entry *stlbe, int esel)
  441. {
  442. struct tlbe_ref ref;
  443. int sesel;
  444. int r;
  445. ref.flags = 0;
  446. r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
  447. &ref);
  448. if (r)
  449. return r;
  450. /* Use TLB0 when we can only map a page with 4k */
  451. if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
  452. vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
  453. write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
  454. return 0;
  455. }
  456. /* Otherwise map into TLB1 */
  457. sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
  458. write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
  459. return 0;
  460. }
  461. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
  462. unsigned int index)
  463. {
  464. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  465. struct tlbe_priv *priv;
  466. struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
  467. int tlbsel = tlbsel_of(index);
  468. int esel = esel_of(index);
  469. gtlbe = get_entry(vcpu_e500, tlbsel, esel);
  470. switch (tlbsel) {
  471. case 0:
  472. priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
  473. /* Triggers after clear_tlb_refs or on initial mapping */
  474. if (!(priv->ref.flags & E500_TLB_VALID)) {
  475. kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
  476. } else {
  477. kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
  478. &priv->ref, eaddr, &stlbe);
  479. write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
  480. }
  481. break;
  482. case 1: {
  483. gfn_t gfn = gpaddr >> PAGE_SHIFT;
  484. kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
  485. esel);
  486. break;
  487. }
  488. default:
  489. BUG();
  490. break;
  491. }
  492. }
  493. /************* MMU Notifiers *************/
  494. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  495. {
  496. trace_kvm_unmap_hva(hva);
  497. /*
  498. * Flush all shadow tlb entries everywhere. This is slow, but
  499. * we are 100% sure that we catch the to be unmapped page
  500. */
  501. kvm_flush_remote_tlbs(kvm);
  502. return 0;
  503. }
  504. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  505. {
  506. /* kvm_unmap_hva flushes everything anyways */
  507. kvm_unmap_hva(kvm, start);
  508. return 0;
  509. }
  510. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  511. {
  512. /* XXX could be more clever ;) */
  513. return 0;
  514. }
  515. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  516. {
  517. /* XXX could be more clever ;) */
  518. return 0;
  519. }
  520. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  521. {
  522. /* The page will get remapped properly on its next fault */
  523. kvm_unmap_hva(kvm, hva);
  524. }
  525. /*****************************************/
  526. int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
  527. {
  528. host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
  529. host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
  530. /*
  531. * This should never happen on real e500 hardware, but is
  532. * architecturally possible -- e.g. in some weird nested
  533. * virtualization case.
  534. */
  535. if (host_tlb_params[0].entries == 0 ||
  536. host_tlb_params[1].entries == 0) {
  537. pr_err("%s: need to know host tlb size\n", __func__);
  538. return -ENODEV;
  539. }
  540. host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
  541. TLBnCFG_ASSOC_SHIFT;
  542. host_tlb_params[1].ways = host_tlb_params[1].entries;
  543. if (!is_power_of_2(host_tlb_params[0].entries) ||
  544. !is_power_of_2(host_tlb_params[0].ways) ||
  545. host_tlb_params[0].entries < host_tlb_params[0].ways ||
  546. host_tlb_params[0].ways == 0) {
  547. pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
  548. __func__, host_tlb_params[0].entries,
  549. host_tlb_params[0].ways);
  550. return -ENODEV;
  551. }
  552. host_tlb_params[0].sets =
  553. host_tlb_params[0].entries / host_tlb_params[0].ways;
  554. host_tlb_params[1].sets = 1;
  555. vcpu_e500->tlb_refs[0] =
  556. kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
  557. GFP_KERNEL);
  558. if (!vcpu_e500->tlb_refs[0])
  559. goto err;
  560. vcpu_e500->tlb_refs[1] =
  561. kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
  562. GFP_KERNEL);
  563. if (!vcpu_e500->tlb_refs[1])
  564. goto err;
  565. vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
  566. host_tlb_params[1].entries,
  567. GFP_KERNEL);
  568. if (!vcpu_e500->h2g_tlb1_rmap)
  569. goto err;
  570. return 0;
  571. err:
  572. kfree(vcpu_e500->tlb_refs[0]);
  573. kfree(vcpu_e500->tlb_refs[1]);
  574. return -EINVAL;
  575. }
  576. void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
  577. {
  578. kfree(vcpu_e500->h2g_tlb1_rmap);
  579. kfree(vcpu_e500->tlb_refs[0]);
  580. kfree(vcpu_e500->tlb_refs[1]);
  581. }