e500_mmu_host.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
  3. *
  4. * Author: Yu Liu, yu.liu@freescale.com
  5. * Scott Wood, scottwood@freescale.com
  6. * Ashish Kalra, ashish.kalra@freescale.com
  7. * Varun Sethi, varun.sethi@freescale.com
  8. * Alexander Graf, agraf@suse.de
  9. *
  10. * Description:
  11. * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12. * by Hollis Blanchard <hollisb@us.ibm.com>.
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License, version 2, as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/types.h>
  20. #include <linux/slab.h>
  21. #include <linux/string.h>
  22. #include <linux/kvm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/highmem.h>
  25. #include <linux/log2.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/sched.h>
  28. #include <linux/rwsem.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/hugetlb.h>
  31. #include <asm/kvm_ppc.h>
  32. #include "e500.h"
  33. #include "timing.h"
  34. #include "e500_mmu_host.h"
  35. #include "trace_booke.h"
  36. #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
  37. static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
  38. static inline unsigned int tlb1_max_shadow_size(void)
  39. {
  40. /* reserve one entry for magic page */
  41. return host_tlb_params[1].entries - tlbcam_index - 1;
  42. }
  43. static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  44. {
  45. /* Mask off reserved bits. */
  46. mas3 &= MAS3_ATTRIB_MASK;
  47. #ifndef CONFIG_KVM_BOOKE_HV
  48. if (!usermode) {
  49. /* Guest is in supervisor mode,
  50. * so we need to translate guest
  51. * supervisor permissions into user permissions. */
  52. mas3 &= ~E500_TLB_USER_PERM_MASK;
  53. mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  54. }
  55. mas3 |= E500_TLB_SUPER_PERM_MASK;
  56. #endif
  57. return mas3;
  58. }
  59. static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
  60. {
  61. #ifdef CONFIG_SMP
  62. return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
  63. #else
  64. return mas2 & MAS2_ATTRIB_MASK;
  65. #endif
  66. }
  67. /*
  68. * writing shadow tlb entry to host TLB
  69. */
  70. static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
  71. uint32_t mas0)
  72. {
  73. unsigned long flags;
  74. local_irq_save(flags);
  75. mtspr(SPRN_MAS0, mas0);
  76. mtspr(SPRN_MAS1, stlbe->mas1);
  77. mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
  78. mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
  79. mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
  80. #ifdef CONFIG_KVM_BOOKE_HV
  81. mtspr(SPRN_MAS8, stlbe->mas8);
  82. #endif
  83. asm volatile("isync; tlbwe" : : : "memory");
  84. #ifdef CONFIG_KVM_BOOKE_HV
  85. /* Must clear mas8 for other host tlbwe's */
  86. mtspr(SPRN_MAS8, 0);
  87. isync();
  88. #endif
  89. local_irq_restore(flags);
  90. trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
  91. stlbe->mas2, stlbe->mas7_3);
  92. }
  93. /*
  94. * Acquire a mas0 with victim hint, as if we just took a TLB miss.
  95. *
  96. * We don't care about the address we're searching for, other than that it's
  97. * in the right set and is not present in the TLB. Using a zero PID and a
  98. * userspace address means we don't have to set and then restore MAS5, or
  99. * calculate a proper MAS6 value.
  100. */
  101. static u32 get_host_mas0(unsigned long eaddr)
  102. {
  103. unsigned long flags;
  104. u32 mas0;
  105. local_irq_save(flags);
  106. mtspr(SPRN_MAS6, 0);
  107. asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
  108. mas0 = mfspr(SPRN_MAS0);
  109. local_irq_restore(flags);
  110. return mas0;
  111. }
  112. /* sesel is for tlb1 only */
  113. static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
  114. int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
  115. {
  116. u32 mas0;
  117. if (tlbsel == 0) {
  118. mas0 = get_host_mas0(stlbe->mas2);
  119. __write_host_tlbe(stlbe, mas0);
  120. } else {
  121. __write_host_tlbe(stlbe,
  122. MAS0_TLBSEL(1) |
  123. MAS0_ESEL(to_htlb1_esel(sesel)));
  124. }
  125. }
  126. /* sesel is for tlb1 only */
  127. static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
  128. struct kvm_book3e_206_tlb_entry *gtlbe,
  129. struct kvm_book3e_206_tlb_entry *stlbe,
  130. int stlbsel, int sesel)
  131. {
  132. int stid;
  133. preempt_disable();
  134. stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
  135. stlbe->mas1 |= MAS1_TID(stid);
  136. write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
  137. preempt_enable();
  138. }
  139. #ifdef CONFIG_KVM_E500V2
  140. /* XXX should be a hook in the gva2hpa translation */
  141. void kvmppc_map_magic(struct kvm_vcpu *vcpu)
  142. {
  143. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  144. struct kvm_book3e_206_tlb_entry magic;
  145. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  146. unsigned int stid;
  147. pfn_t pfn;
  148. pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
  149. get_page(pfn_to_page(pfn));
  150. preempt_disable();
  151. stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
  152. magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
  153. MAS1_TSIZE(BOOK3E_PAGESZ_4K);
  154. magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
  155. magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
  156. MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
  157. magic.mas8 = 0;
  158. __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
  159. preempt_enable();
  160. }
  161. #endif
  162. void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
  163. int esel)
  164. {
  165. struct kvm_book3e_206_tlb_entry *gtlbe =
  166. get_entry(vcpu_e500, tlbsel, esel);
  167. struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
  168. /* Don't bother with unmapped entries */
  169. if (!(ref->flags & E500_TLB_VALID)) {
  170. WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
  171. "%s: flags %x\n", __func__, ref->flags);
  172. WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
  173. }
  174. if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
  175. u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
  176. int hw_tlb_indx;
  177. unsigned long flags;
  178. local_irq_save(flags);
  179. while (tmp) {
  180. hw_tlb_indx = __ilog2_u64(tmp & -tmp);
  181. mtspr(SPRN_MAS0,
  182. MAS0_TLBSEL(1) |
  183. MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
  184. mtspr(SPRN_MAS1, 0);
  185. asm volatile("tlbwe");
  186. vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
  187. tmp &= tmp - 1;
  188. }
  189. mb();
  190. vcpu_e500->g2h_tlb1_map[esel] = 0;
  191. ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
  192. local_irq_restore(flags);
  193. }
  194. if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
  195. /*
  196. * TLB1 entry is backed by 4k pages. This should happen
  197. * rarely and is not worth optimizing. Invalidate everything.
  198. */
  199. kvmppc_e500_tlbil_all(vcpu_e500);
  200. ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
  201. }
  202. /* Already invalidated in between */
  203. if (!(ref->flags & E500_TLB_VALID))
  204. return;
  205. /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
  206. kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
  207. /* Mark the TLB as not backed by the host anymore */
  208. ref->flags &= ~E500_TLB_VALID;
  209. }
  210. static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
  211. {
  212. return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
  213. }
  214. static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
  215. struct kvm_book3e_206_tlb_entry *gtlbe,
  216. pfn_t pfn)
  217. {
  218. ref->pfn = pfn;
  219. ref->flags |= E500_TLB_VALID;
  220. /* Mark the page accessed */
  221. kvm_set_pfn_accessed(pfn);
  222. if (tlbe_is_writable(gtlbe))
  223. kvm_set_pfn_dirty(pfn);
  224. }
  225. static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
  226. {
  227. if (ref->flags & E500_TLB_VALID) {
  228. /* FIXME: don't log bogus pfn for TLB1 */
  229. trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
  230. ref->flags = 0;
  231. }
  232. }
  233. static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
  234. {
  235. if (vcpu_e500->g2h_tlb1_map)
  236. memset(vcpu_e500->g2h_tlb1_map, 0,
  237. sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
  238. if (vcpu_e500->h2g_tlb1_rmap)
  239. memset(vcpu_e500->h2g_tlb1_rmap, 0,
  240. sizeof(unsigned int) * host_tlb_params[1].entries);
  241. }
  242. static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
  243. {
  244. int tlbsel;
  245. int i;
  246. for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
  247. for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
  248. struct tlbe_ref *ref =
  249. &vcpu_e500->gtlb_priv[tlbsel][i].ref;
  250. kvmppc_e500_ref_release(ref);
  251. }
  252. }
  253. }
  254. void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
  255. {
  256. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  257. kvmppc_e500_tlbil_all(vcpu_e500);
  258. clear_tlb_privs(vcpu_e500);
  259. clear_tlb1_bitmap(vcpu_e500);
  260. }
  261. /* TID must be supplied by the caller */
  262. static void kvmppc_e500_setup_stlbe(
  263. struct kvm_vcpu *vcpu,
  264. struct kvm_book3e_206_tlb_entry *gtlbe,
  265. int tsize, struct tlbe_ref *ref, u64 gvaddr,
  266. struct kvm_book3e_206_tlb_entry *stlbe)
  267. {
  268. pfn_t pfn = ref->pfn;
  269. u32 pr = vcpu->arch.shared->msr & MSR_PR;
  270. BUG_ON(!(ref->flags & E500_TLB_VALID));
  271. /* Force IPROT=0 for all guest mappings. */
  272. stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
  273. stlbe->mas2 = (gvaddr & MAS2_EPN) |
  274. e500_shadow_mas2_attrib(gtlbe->mas2, pr);
  275. stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
  276. e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
  277. #ifdef CONFIG_KVM_BOOKE_HV
  278. stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
  279. #endif
  280. }
  281. static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  282. u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
  283. int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
  284. struct tlbe_ref *ref)
  285. {
  286. struct kvm_memory_slot *slot;
  287. unsigned long pfn = 0; /* silence GCC warning */
  288. unsigned long hva;
  289. int pfnmap = 0;
  290. int tsize = BOOK3E_PAGESZ_4K;
  291. int ret = 0;
  292. unsigned long mmu_seq;
  293. struct kvm *kvm = vcpu_e500->vcpu.kvm;
  294. /* used to check for invalidations in progress */
  295. mmu_seq = kvm->mmu_notifier_seq;
  296. smp_rmb();
  297. /*
  298. * Translate guest physical to true physical, acquiring
  299. * a page reference if it is normal, non-reserved memory.
  300. *
  301. * gfn_to_memslot() must succeed because otherwise we wouldn't
  302. * have gotten this far. Eventually we should just pass the slot
  303. * pointer through from the first lookup.
  304. */
  305. slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
  306. hva = gfn_to_hva_memslot(slot, gfn);
  307. if (tlbsel == 1) {
  308. struct vm_area_struct *vma;
  309. down_read(&current->mm->mmap_sem);
  310. vma = find_vma(current->mm, hva);
  311. if (vma && hva >= vma->vm_start &&
  312. (vma->vm_flags & VM_PFNMAP)) {
  313. /*
  314. * This VMA is a physically contiguous region (e.g.
  315. * /dev/mem) that bypasses normal Linux page
  316. * management. Find the overlap between the
  317. * vma and the memslot.
  318. */
  319. unsigned long start, end;
  320. unsigned long slot_start, slot_end;
  321. pfnmap = 1;
  322. start = vma->vm_pgoff;
  323. end = start +
  324. ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  325. pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
  326. slot_start = pfn - (gfn - slot->base_gfn);
  327. slot_end = slot_start + slot->npages;
  328. if (start < slot_start)
  329. start = slot_start;
  330. if (end > slot_end)
  331. end = slot_end;
  332. tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
  333. MAS1_TSIZE_SHIFT;
  334. /*
  335. * e500 doesn't implement the lowest tsize bit,
  336. * or 1K pages.
  337. */
  338. tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
  339. /*
  340. * Now find the largest tsize (up to what the guest
  341. * requested) that will cover gfn, stay within the
  342. * range, and for which gfn and pfn are mutually
  343. * aligned.
  344. */
  345. for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
  346. unsigned long gfn_start, gfn_end, tsize_pages;
  347. tsize_pages = 1 << (tsize - 2);
  348. gfn_start = gfn & ~(tsize_pages - 1);
  349. gfn_end = gfn_start + tsize_pages;
  350. if (gfn_start + pfn - gfn < start)
  351. continue;
  352. if (gfn_end + pfn - gfn > end)
  353. continue;
  354. if ((gfn & (tsize_pages - 1)) !=
  355. (pfn & (tsize_pages - 1)))
  356. continue;
  357. gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
  358. pfn &= ~(tsize_pages - 1);
  359. break;
  360. }
  361. } else if (vma && hva >= vma->vm_start &&
  362. (vma->vm_flags & VM_HUGETLB)) {
  363. unsigned long psize = vma_kernel_pagesize(vma);
  364. tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
  365. MAS1_TSIZE_SHIFT;
  366. /*
  367. * Take the largest page size that satisfies both host
  368. * and guest mapping
  369. */
  370. tsize = min(__ilog2(psize) - 10, tsize);
  371. /*
  372. * e500 doesn't implement the lowest tsize bit,
  373. * or 1K pages.
  374. */
  375. tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
  376. }
  377. up_read(&current->mm->mmap_sem);
  378. }
  379. if (likely(!pfnmap)) {
  380. unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
  381. pfn = gfn_to_pfn_memslot(slot, gfn);
  382. if (is_error_noslot_pfn(pfn)) {
  383. printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
  384. (long)gfn);
  385. return -EINVAL;
  386. }
  387. /* Align guest and physical address to page map boundaries */
  388. pfn &= ~(tsize_pages - 1);
  389. gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
  390. }
  391. spin_lock(&kvm->mmu_lock);
  392. if (mmu_notifier_retry(kvm, mmu_seq)) {
  393. ret = -EAGAIN;
  394. goto out;
  395. }
  396. kvmppc_e500_ref_setup(ref, gtlbe, pfn);
  397. kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
  398. ref, gvaddr, stlbe);
  399. /* Clear i-cache for new pages */
  400. kvmppc_mmu_flush_icache(pfn);
  401. out:
  402. spin_unlock(&kvm->mmu_lock);
  403. /* Drop refcount on page, so that mmu notifiers can clear it */
  404. kvm_release_pfn_clean(pfn);
  405. return ret;
  406. }
  407. /* XXX only map the one-one case, for now use TLB0 */
  408. static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
  409. struct kvm_book3e_206_tlb_entry *stlbe)
  410. {
  411. struct kvm_book3e_206_tlb_entry *gtlbe;
  412. struct tlbe_ref *ref;
  413. int stlbsel = 0;
  414. int sesel = 0;
  415. int r;
  416. gtlbe = get_entry(vcpu_e500, 0, esel);
  417. ref = &vcpu_e500->gtlb_priv[0][esel].ref;
  418. r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
  419. get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
  420. gtlbe, 0, stlbe, ref);
  421. if (r)
  422. return r;
  423. write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
  424. return 0;
  425. }
  426. static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
  427. struct tlbe_ref *ref,
  428. int esel)
  429. {
  430. unsigned int sesel = vcpu_e500->host_tlb1_nv++;
  431. if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
  432. vcpu_e500->host_tlb1_nv = 0;
  433. if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
  434. unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
  435. vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
  436. }
  437. vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
  438. vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
  439. vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
  440. WARN_ON(!(ref->flags & E500_TLB_VALID));
  441. return sesel;
  442. }
  443. /* Caller must ensure that the specified guest TLB entry is safe to insert into
  444. * the shadow TLB. */
  445. /* For both one-one and one-to-many */
  446. static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  447. u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
  448. struct kvm_book3e_206_tlb_entry *stlbe, int esel)
  449. {
  450. struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
  451. int sesel;
  452. int r;
  453. r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
  454. ref);
  455. if (r)
  456. return r;
  457. /* Use TLB0 when we can only map a page with 4k */
  458. if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
  459. vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
  460. write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
  461. return 0;
  462. }
  463. /* Otherwise map into TLB1 */
  464. sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
  465. write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
  466. return 0;
  467. }
  468. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
  469. unsigned int index)
  470. {
  471. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  472. struct tlbe_priv *priv;
  473. struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
  474. int tlbsel = tlbsel_of(index);
  475. int esel = esel_of(index);
  476. gtlbe = get_entry(vcpu_e500, tlbsel, esel);
  477. switch (tlbsel) {
  478. case 0:
  479. priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
  480. /* Triggers after clear_tlb_privs or on initial mapping */
  481. if (!(priv->ref.flags & E500_TLB_VALID)) {
  482. kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
  483. } else {
  484. kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
  485. &priv->ref, eaddr, &stlbe);
  486. write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
  487. }
  488. break;
  489. case 1: {
  490. gfn_t gfn = gpaddr >> PAGE_SHIFT;
  491. kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
  492. esel);
  493. break;
  494. }
  495. default:
  496. BUG();
  497. break;
  498. }
  499. }
  500. /************* MMU Notifiers *************/
  501. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  502. {
  503. trace_kvm_unmap_hva(hva);
  504. /*
  505. * Flush all shadow tlb entries everywhere. This is slow, but
  506. * we are 100% sure that we catch the to be unmapped page
  507. */
  508. kvm_flush_remote_tlbs(kvm);
  509. return 0;
  510. }
  511. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  512. {
  513. /* kvm_unmap_hva flushes everything anyways */
  514. kvm_unmap_hva(kvm, start);
  515. return 0;
  516. }
  517. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  518. {
  519. /* XXX could be more clever ;) */
  520. return 0;
  521. }
  522. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  523. {
  524. /* XXX could be more clever ;) */
  525. return 0;
  526. }
  527. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  528. {
  529. /* The page will get remapped properly on its next fault */
  530. kvm_unmap_hva(kvm, hva);
  531. }
  532. /*****************************************/
  533. int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
  534. {
  535. host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
  536. host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
  537. /*
  538. * This should never happen on real e500 hardware, but is
  539. * architecturally possible -- e.g. in some weird nested
  540. * virtualization case.
  541. */
  542. if (host_tlb_params[0].entries == 0 ||
  543. host_tlb_params[1].entries == 0) {
  544. pr_err("%s: need to know host tlb size\n", __func__);
  545. return -ENODEV;
  546. }
  547. host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
  548. TLBnCFG_ASSOC_SHIFT;
  549. host_tlb_params[1].ways = host_tlb_params[1].entries;
  550. if (!is_power_of_2(host_tlb_params[0].entries) ||
  551. !is_power_of_2(host_tlb_params[0].ways) ||
  552. host_tlb_params[0].entries < host_tlb_params[0].ways ||
  553. host_tlb_params[0].ways == 0) {
  554. pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
  555. __func__, host_tlb_params[0].entries,
  556. host_tlb_params[0].ways);
  557. return -ENODEV;
  558. }
  559. host_tlb_params[0].sets =
  560. host_tlb_params[0].entries / host_tlb_params[0].ways;
  561. host_tlb_params[1].sets = 1;
  562. vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
  563. host_tlb_params[1].entries,
  564. GFP_KERNEL);
  565. if (!vcpu_e500->h2g_tlb1_rmap)
  566. return -EINVAL;
  567. return 0;
  568. }
  569. void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
  570. {
  571. kfree(vcpu_e500->h2g_tlb1_rmap);
  572. }