e500_tlb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  3. *
  4. * Author: Yu Liu, yu.liu@freescale.com
  5. *
  6. * Description:
  7. * This file is based on arch/powerpc/kvm/44x_tlb.c,
  8. * by Hollis Blanchard <hollisb@us.ibm.com>.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/slab.h>
  16. #include <linux/string.h>
  17. #include <linux/kvm.h>
  18. #include <linux/kvm_host.h>
  19. #include <linux/highmem.h>
  20. #include <asm/kvm_ppc.h>
  21. #include <asm/kvm_e500.h>
  22. #include "../mm/mmu_decl.h"
  23. #include "e500_tlb.h"
  24. #include "trace.h"
  25. #include "timing.h"
  26. #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
  27. static unsigned int tlb1_entry_num;
  28. void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  29. {
  30. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  31. struct tlbe *tlbe;
  32. int i, tlbsel;
  33. printk("| %8s | %8s | %8s | %8s | %8s |\n",
  34. "nr", "mas1", "mas2", "mas3", "mas7");
  35. for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  36. printk("Guest TLB%d:\n", tlbsel);
  37. for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
  38. tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
  39. if (tlbe->mas1 & MAS1_VALID)
  40. printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
  41. tlbsel, i, tlbe->mas1, tlbe->mas2,
  42. tlbe->mas3, tlbe->mas7);
  43. }
  44. }
  45. for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  46. printk("Shadow TLB%d:\n", tlbsel);
  47. for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
  48. tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
  49. if (tlbe->mas1 & MAS1_VALID)
  50. printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
  51. tlbsel, i, tlbe->mas1, tlbe->mas2,
  52. tlbe->mas3, tlbe->mas7);
  53. }
  54. }
  55. }
  56. static inline unsigned int tlb0_get_next_victim(
  57. struct kvmppc_vcpu_e500 *vcpu_e500)
  58. {
  59. unsigned int victim;
  60. victim = vcpu_e500->guest_tlb_nv[0]++;
  61. if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
  62. vcpu_e500->guest_tlb_nv[0] = 0;
  63. return victim;
  64. }
  65. static inline unsigned int tlb1_max_shadow_size(void)
  66. {
  67. /* reserve one entry for magic page */
  68. return tlb1_entry_num - tlbcam_index - 1;
  69. }
  70. static inline int tlbe_is_writable(struct tlbe *tlbe)
  71. {
  72. return tlbe->mas3 & (MAS3_SW|MAS3_UW);
  73. }
  74. static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  75. {
  76. /* Mask off reserved bits. */
  77. mas3 &= MAS3_ATTRIB_MASK;
  78. if (!usermode) {
  79. /* Guest is in supervisor mode,
  80. * so we need to translate guest
  81. * supervisor permissions into user permissions. */
  82. mas3 &= ~E500_TLB_USER_PERM_MASK;
  83. mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  84. }
  85. return mas3 | E500_TLB_SUPER_PERM_MASK;
  86. }
  87. static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
  88. {
  89. #ifdef CONFIG_SMP
  90. return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
  91. #else
  92. return mas2 & MAS2_ATTRIB_MASK;
  93. #endif
  94. }
  95. /*
  96. * writing shadow tlb entry to host TLB
  97. */
  98. static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
  99. {
  100. unsigned long flags;
  101. local_irq_save(flags);
  102. mtspr(SPRN_MAS0, mas0);
  103. mtspr(SPRN_MAS1, stlbe->mas1);
  104. mtspr(SPRN_MAS2, stlbe->mas2);
  105. mtspr(SPRN_MAS3, stlbe->mas3);
  106. mtspr(SPRN_MAS7, stlbe->mas7);
  107. asm volatile("isync; tlbwe" : : : "memory");
  108. local_irq_restore(flags);
  109. }
  110. static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
  111. int tlbsel, int esel)
  112. {
  113. struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
  114. if (tlbsel == 0) {
  115. __write_host_tlbe(stlbe,
  116. MAS0_TLBSEL(0) |
  117. MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
  118. } else {
  119. __write_host_tlbe(stlbe,
  120. MAS0_TLBSEL(1) |
  121. MAS0_ESEL(to_htlb1_esel(esel)));
  122. }
  123. }
  124. void kvmppc_map_magic(struct kvm_vcpu *vcpu)
  125. {
  126. struct tlbe magic;
  127. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  128. pfn_t pfn;
  129. pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
  130. get_page(pfn_to_page(pfn));
  131. magic.mas1 = MAS1_VALID | MAS1_TS |
  132. MAS1_TSIZE(BOOK3E_PAGESZ_4K);
  133. magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
  134. magic.mas3 = (pfn << PAGE_SHIFT) |
  135. MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
  136. magic.mas7 = pfn >> (32 - PAGE_SHIFT);
  137. __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
  138. }
  139. void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
  140. {
  141. }
  142. void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
  143. {
  144. _tlbil_all();
  145. }
  146. /* Search the guest TLB for a matching entry. */
  147. static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
  148. gva_t eaddr, int tlbsel, unsigned int pid, int as)
  149. {
  150. int i;
  151. /* XXX Replace loop with fancy data structures. */
  152. for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
  153. struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
  154. unsigned int tid;
  155. if (eaddr < get_tlb_eaddr(tlbe))
  156. continue;
  157. if (eaddr > get_tlb_end(tlbe))
  158. continue;
  159. tid = get_tlb_tid(tlbe);
  160. if (tid && (tid != pid))
  161. continue;
  162. if (!get_tlb_v(tlbe))
  163. continue;
  164. if (get_tlb_ts(tlbe) != as && as != -1)
  165. continue;
  166. return i;
  167. }
  168. return -1;
  169. }
  170. static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
  171. int tlbsel, int esel)
  172. {
  173. struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
  174. unsigned long pfn;
  175. pfn = stlbe->mas3 >> PAGE_SHIFT;
  176. pfn |= stlbe->mas7 << (32 - PAGE_SHIFT);
  177. if (get_tlb_v(stlbe)) {
  178. if (tlbe_is_writable(stlbe))
  179. kvm_release_pfn_dirty(pfn);
  180. else
  181. kvm_release_pfn_clean(pfn);
  182. }
  183. }
  184. static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
  185. int tlbsel, int esel)
  186. {
  187. struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
  188. kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
  189. stlbe->mas1 = 0;
  190. trace_kvm_stlb_inval(index_of(tlbsel, esel));
  191. }
  192. static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
  193. gva_t eaddr, gva_t eend, u32 tid)
  194. {
  195. unsigned int pid = tid & 0xff;
  196. unsigned int i;
  197. /* XXX Replace loop with fancy data structures. */
  198. for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
  199. struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
  200. unsigned int tid;
  201. if (!get_tlb_v(stlbe))
  202. continue;
  203. if (eend < get_tlb_eaddr(stlbe))
  204. continue;
  205. if (eaddr > get_tlb_end(stlbe))
  206. continue;
  207. tid = get_tlb_tid(stlbe);
  208. if (tid && (tid != pid))
  209. continue;
  210. kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
  211. write_host_tlbe(vcpu_e500, 1, i);
  212. }
  213. }
  214. static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
  215. unsigned int eaddr, int as)
  216. {
  217. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  218. unsigned int victim, pidsel, tsized;
  219. int tlbsel;
  220. /* since we only have two TLBs, only lower bit is used. */
  221. tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
  222. victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
  223. pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
  224. tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
  225. vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
  226. | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
  227. vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
  228. | MAS1_TID(vcpu_e500->pid[pidsel])
  229. | MAS1_TSIZE(tsized);
  230. vcpu_e500->mas2 = (eaddr & MAS2_EPN)
  231. | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
  232. vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
  233. vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
  234. | (get_cur_pid(vcpu) << 16)
  235. | (as ? MAS6_SAS : 0);
  236. vcpu_e500->mas7 = 0;
  237. }
  238. static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  239. u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
  240. {
  241. struct kvm_memory_slot *slot;
  242. struct tlbe *stlbe;
  243. unsigned long pfn, hva;
  244. int pfnmap = 0;
  245. int tsize = BOOK3E_PAGESZ_4K;
  246. stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
  247. /*
  248. * Translate guest physical to true physical, acquiring
  249. * a page reference if it is normal, non-reserved memory.
  250. *
  251. * gfn_to_memslot() must succeed because otherwise we wouldn't
  252. * have gotten this far. Eventually we should just pass the slot
  253. * pointer through from the first lookup.
  254. */
  255. slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
  256. hva = gfn_to_hva_memslot(slot, gfn);
  257. if (tlbsel == 1) {
  258. struct vm_area_struct *vma;
  259. down_read(&current->mm->mmap_sem);
  260. vma = find_vma(current->mm, hva);
  261. if (vma && hva >= vma->vm_start &&
  262. (vma->vm_flags & VM_PFNMAP)) {
  263. /*
  264. * This VMA is a physically contiguous region (e.g.
  265. * /dev/mem) that bypasses normal Linux page
  266. * management. Find the overlap between the
  267. * vma and the memslot.
  268. */
  269. unsigned long start, end;
  270. unsigned long slot_start, slot_end;
  271. pfnmap = 1;
  272. start = vma->vm_pgoff;
  273. end = start +
  274. ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  275. pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
  276. slot_start = pfn - (gfn - slot->base_gfn);
  277. slot_end = slot_start + slot->npages;
  278. if (start < slot_start)
  279. start = slot_start;
  280. if (end > slot_end)
  281. end = slot_end;
  282. tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
  283. MAS1_TSIZE_SHIFT;
  284. /*
  285. * e500 doesn't implement the lowest tsize bit,
  286. * or 1K pages.
  287. */
  288. tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
  289. /*
  290. * Now find the largest tsize (up to what the guest
  291. * requested) that will cover gfn, stay within the
  292. * range, and for which gfn and pfn are mutually
  293. * aligned.
  294. */
  295. for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
  296. unsigned long gfn_start, gfn_end, tsize_pages;
  297. tsize_pages = 1 << (tsize - 2);
  298. gfn_start = gfn & ~(tsize_pages - 1);
  299. gfn_end = gfn_start + tsize_pages;
  300. if (gfn_start + pfn - gfn < start)
  301. continue;
  302. if (gfn_end + pfn - gfn > end)
  303. continue;
  304. if ((gfn & (tsize_pages - 1)) !=
  305. (pfn & (tsize_pages - 1)))
  306. continue;
  307. gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
  308. pfn &= ~(tsize_pages - 1);
  309. break;
  310. }
  311. }
  312. up_read(&current->mm->mmap_sem);
  313. }
  314. if (likely(!pfnmap)) {
  315. pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
  316. if (is_error_pfn(pfn)) {
  317. printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
  318. (long)gfn);
  319. kvm_release_pfn_clean(pfn);
  320. return;
  321. }
  322. }
  323. /* Drop reference to old page. */
  324. kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
  325. /* Force TS=1 IPROT=0 for all guest mappings. */
  326. stlbe->mas1 = MAS1_TSIZE(tsize)
  327. | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
  328. stlbe->mas2 = (gvaddr & MAS2_EPN)
  329. | e500_shadow_mas2_attrib(gtlbe->mas2,
  330. vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
  331. stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
  332. | e500_shadow_mas3_attrib(gtlbe->mas3,
  333. vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
  334. stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
  335. trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
  336. stlbe->mas3, stlbe->mas7);
  337. }
  338. /* XXX only map the one-one case, for now use TLB0 */
  339. static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  340. int tlbsel, int esel)
  341. {
  342. struct tlbe *gtlbe;
  343. gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
  344. kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
  345. get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
  346. gtlbe, tlbsel, esel);
  347. return esel;
  348. }
  349. /* Caller must ensure that the specified guest TLB entry is safe to insert into
  350. * the shadow TLB. */
  351. /* XXX for both one-one and one-to-many , for now use TLB1 */
  352. static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
  353. u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
  354. {
  355. unsigned int victim;
  356. victim = vcpu_e500->guest_tlb_nv[1]++;
  357. if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
  358. vcpu_e500->guest_tlb_nv[1] = 0;
  359. kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
  360. return victim;
  361. }
  362. /* Invalidate all guest kernel mappings when enter usermode,
  363. * so that when they fault back in they will get the
  364. * proper permission bits. */
  365. void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
  366. {
  367. if (usermode) {
  368. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  369. int i;
  370. /* XXX Replace loop with fancy data structures. */
  371. for (i = 0; i < tlb1_max_shadow_size(); i++)
  372. kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
  373. _tlbil_all();
  374. }
  375. }
  376. static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
  377. int tlbsel, int esel)
  378. {
  379. struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
  380. if (unlikely(get_tlb_iprot(gtlbe)))
  381. return -1;
  382. if (tlbsel == 1) {
  383. kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
  384. get_tlb_end(gtlbe),
  385. get_tlb_tid(gtlbe));
  386. } else {
  387. kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
  388. }
  389. gtlbe->mas1 = 0;
  390. return 0;
  391. }
  392. int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
  393. {
  394. int esel;
  395. if (value & MMUCSR0_TLB0FI)
  396. for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
  397. kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
  398. if (value & MMUCSR0_TLB1FI)
  399. for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
  400. kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
  401. _tlbil_all();
  402. return EMULATE_DONE;
  403. }
  404. int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
  405. {
  406. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  407. unsigned int ia;
  408. int esel, tlbsel;
  409. gva_t ea;
  410. ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
  411. ia = (ea >> 2) & 0x1;
  412. /* since we only have two TLBs, only lower bit is used. */
  413. tlbsel = (ea >> 3) & 0x1;
  414. if (ia) {
  415. /* invalidate all entries */
  416. for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
  417. kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
  418. } else {
  419. ea &= 0xfffff000;
  420. esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
  421. get_cur_pid(vcpu), -1);
  422. if (esel >= 0)
  423. kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
  424. }
  425. _tlbil_all();
  426. return EMULATE_DONE;
  427. }
  428. int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
  429. {
  430. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  431. int tlbsel, esel;
  432. struct tlbe *gtlbe;
  433. tlbsel = get_tlb_tlbsel(vcpu_e500);
  434. esel = get_tlb_esel(vcpu_e500, tlbsel);
  435. gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
  436. vcpu_e500->mas0 &= ~MAS0_NV(~0);
  437. vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
  438. vcpu_e500->mas1 = gtlbe->mas1;
  439. vcpu_e500->mas2 = gtlbe->mas2;
  440. vcpu_e500->mas3 = gtlbe->mas3;
  441. vcpu_e500->mas7 = gtlbe->mas7;
  442. return EMULATE_DONE;
  443. }
  444. int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
  445. {
  446. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  447. int as = !!get_cur_sas(vcpu_e500);
  448. unsigned int pid = get_cur_spid(vcpu_e500);
  449. int esel, tlbsel;
  450. struct tlbe *gtlbe = NULL;
  451. gva_t ea;
  452. ea = kvmppc_get_gpr(vcpu, rb);
  453. for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  454. esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
  455. if (esel >= 0) {
  456. gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
  457. break;
  458. }
  459. }
  460. if (gtlbe) {
  461. vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
  462. | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
  463. vcpu_e500->mas1 = gtlbe->mas1;
  464. vcpu_e500->mas2 = gtlbe->mas2;
  465. vcpu_e500->mas3 = gtlbe->mas3;
  466. vcpu_e500->mas7 = gtlbe->mas7;
  467. } else {
  468. int victim;
  469. /* since we only have two TLBs, only lower bit is used. */
  470. tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
  471. victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
  472. vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
  473. | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
  474. vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
  475. | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
  476. | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
  477. vcpu_e500->mas2 &= MAS2_EPN;
  478. vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
  479. vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
  480. vcpu_e500->mas7 = 0;
  481. }
  482. kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
  483. return EMULATE_DONE;
  484. }
  485. int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
  486. {
  487. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  488. u64 eaddr;
  489. u64 raddr;
  490. u32 tid;
  491. struct tlbe *gtlbe;
  492. int tlbsel, esel, stlbsel, sesel;
  493. tlbsel = get_tlb_tlbsel(vcpu_e500);
  494. esel = get_tlb_esel(vcpu_e500, tlbsel);
  495. gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
  496. if (get_tlb_v(gtlbe) && tlbsel == 1) {
  497. eaddr = get_tlb_eaddr(gtlbe);
  498. tid = get_tlb_tid(gtlbe);
  499. kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
  500. get_tlb_end(gtlbe), tid);
  501. }
  502. gtlbe->mas1 = vcpu_e500->mas1;
  503. gtlbe->mas2 = vcpu_e500->mas2;
  504. gtlbe->mas3 = vcpu_e500->mas3;
  505. gtlbe->mas7 = vcpu_e500->mas7;
  506. trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
  507. gtlbe->mas3, gtlbe->mas7);
  508. /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
  509. if (tlbe_is_host_safe(vcpu, gtlbe)) {
  510. switch (tlbsel) {
  511. case 0:
  512. /* TLB0 */
  513. gtlbe->mas1 &= ~MAS1_TSIZE(~0);
  514. gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
  515. stlbsel = 0;
  516. sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
  517. break;
  518. case 1:
  519. /* TLB1 */
  520. eaddr = get_tlb_eaddr(gtlbe);
  521. raddr = get_tlb_raddr(gtlbe);
  522. /* Create a 4KB mapping on the host.
  523. * If the guest wanted a large page,
  524. * only the first 4KB is mapped here and the rest
  525. * are mapped on the fly. */
  526. stlbsel = 1;
  527. sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
  528. raddr >> PAGE_SHIFT, gtlbe);
  529. break;
  530. default:
  531. BUG();
  532. }
  533. write_host_tlbe(vcpu_e500, stlbsel, sesel);
  534. }
  535. kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
  536. return EMULATE_DONE;
  537. }
  538. int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  539. {
  540. unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
  541. return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
  542. }
  543. int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  544. {
  545. unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
  546. return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
  547. }
  548. void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
  549. {
  550. unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
  551. kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
  552. }
  553. void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
  554. {
  555. unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
  556. kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
  557. }
  558. gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
  559. gva_t eaddr)
  560. {
  561. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  562. struct tlbe *gtlbe =
  563. &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
  564. u64 pgmask = get_tlb_bytes(gtlbe) - 1;
  565. return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
  566. }
  567. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  568. {
  569. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  570. int tlbsel, i;
  571. for (tlbsel = 0; tlbsel < 2; tlbsel++)
  572. for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
  573. kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
  574. /* discard all guest mapping */
  575. _tlbil_all();
  576. }
  577. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
  578. unsigned int index)
  579. {
  580. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  581. int tlbsel = tlbsel_of(index);
  582. int esel = esel_of(index);
  583. int stlbsel, sesel;
  584. switch (tlbsel) {
  585. case 0:
  586. stlbsel = 0;
  587. sesel = esel;
  588. break;
  589. case 1: {
  590. gfn_t gfn = gpaddr >> PAGE_SHIFT;
  591. struct tlbe *gtlbe
  592. = &vcpu_e500->guest_tlb[tlbsel][esel];
  593. stlbsel = 1;
  594. sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
  595. break;
  596. }
  597. default:
  598. BUG();
  599. break;
  600. }
  601. write_host_tlbe(vcpu_e500, stlbsel, sesel);
  602. }
  603. int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
  604. gva_t eaddr, unsigned int pid, int as)
  605. {
  606. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  607. int esel, tlbsel;
  608. for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  609. esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
  610. if (esel >= 0)
  611. return index_of(tlbsel, esel);
  612. }
  613. return -1;
  614. }
  615. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
  616. {
  617. struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  618. vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
  619. vcpu->arch.pid = pid;
  620. }
  621. void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
  622. {
  623. struct tlbe *tlbe;
  624. /* Insert large initial mapping for guest. */
  625. tlbe = &vcpu_e500->guest_tlb[1][0];
  626. tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
  627. tlbe->mas2 = 0;
  628. tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
  629. tlbe->mas7 = 0;
  630. /* 4K map for serial output. Used by kernel wrapper. */
  631. tlbe = &vcpu_e500->guest_tlb[1][1];
  632. tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
  633. tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
  634. tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
  635. tlbe->mas7 = 0;
  636. }
  637. int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
  638. {
  639. tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
  640. vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
  641. vcpu_e500->guest_tlb[0] =
  642. kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
  643. if (vcpu_e500->guest_tlb[0] == NULL)
  644. goto err_out;
  645. vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
  646. vcpu_e500->shadow_tlb[0] =
  647. kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
  648. if (vcpu_e500->shadow_tlb[0] == NULL)
  649. goto err_out_guest0;
  650. vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
  651. vcpu_e500->guest_tlb[1] =
  652. kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
  653. if (vcpu_e500->guest_tlb[1] == NULL)
  654. goto err_out_shadow0;
  655. vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
  656. vcpu_e500->shadow_tlb[1] =
  657. kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
  658. if (vcpu_e500->shadow_tlb[1] == NULL)
  659. goto err_out_guest1;
  660. /* Init TLB configuration register */
  661. vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
  662. vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
  663. vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
  664. vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
  665. return 0;
  666. err_out_guest1:
  667. kfree(vcpu_e500->guest_tlb[1]);
  668. err_out_shadow0:
  669. kfree(vcpu_e500->shadow_tlb[0]);
  670. err_out_guest0:
  671. kfree(vcpu_e500->guest_tlb[0]);
  672. err_out:
  673. return -1;
  674. }
  675. void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
  676. {
  677. kfree(vcpu_e500->shadow_tlb[1]);
  678. kfree(vcpu_e500->guest_tlb[1]);
  679. kfree(vcpu_e500->shadow_tlb[0]);
  680. kfree(vcpu_e500->guest_tlb[0]);
  681. }