44x_tlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/mmu-44x.h>
  26. #include <asm/kvm_ppc.h>
  27. #include <asm/kvm_44x.h>
  28. #include "timing.h"
  29. #include "44x_tlb.h"
  30. #include "trace.h"
  31. #ifndef PPC44x_TLBE_SIZE
  32. #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
  33. #endif
  34. #define PAGE_SIZE_4K (1<<12)
  35. #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
  36. #define PPC44x_TLB_UATTR_MASK \
  37. (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
  38. #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
  39. #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
  40. #ifdef DEBUG
  41. void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  42. {
  43. struct kvmppc_44x_tlbe *tlbe;
  44. int i;
  45. printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
  46. printk("| %2s | %3s | %8s | %8s | %8s |\n",
  47. "nr", "tid", "word0", "word1", "word2");
  48. for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
  49. tlbe = &vcpu_44x->guest_tlb[i];
  50. if (tlbe->word0 & PPC44x_TLB_VALID)
  51. printk(" G%2d | %02X | %08X | %08X | %08X |\n",
  52. i, tlbe->tid, tlbe->word0, tlbe->word1,
  53. tlbe->word2);
  54. }
  55. }
  56. #endif
  57. static inline void kvmppc_44x_tlbie(unsigned int index)
  58. {
  59. /* 0 <= index < 64, so the V bit is clear and we can use the index as
  60. * word0. */
  61. asm volatile(
  62. "tlbwe %[index], %[index], 0\n"
  63. :
  64. : [index] "r"(index)
  65. );
  66. }
  67. static inline void kvmppc_44x_tlbre(unsigned int index,
  68. struct kvmppc_44x_tlbe *tlbe)
  69. {
  70. asm volatile(
  71. "tlbre %[word0], %[index], 0\n"
  72. "mfspr %[tid], %[sprn_mmucr]\n"
  73. "andi. %[tid], %[tid], 0xff\n"
  74. "tlbre %[word1], %[index], 1\n"
  75. "tlbre %[word2], %[index], 2\n"
  76. : [word0] "=r"(tlbe->word0),
  77. [word1] "=r"(tlbe->word1),
  78. [word2] "=r"(tlbe->word2),
  79. [tid] "=r"(tlbe->tid)
  80. : [index] "r"(index),
  81. [sprn_mmucr] "i"(SPRN_MMUCR)
  82. : "cc"
  83. );
  84. }
  85. static inline void kvmppc_44x_tlbwe(unsigned int index,
  86. struct kvmppc_44x_tlbe *stlbe)
  87. {
  88. unsigned long tmp;
  89. asm volatile(
  90. "mfspr %[tmp], %[sprn_mmucr]\n"
  91. "rlwimi %[tmp], %[tid], 0, 0xff\n"
  92. "mtspr %[sprn_mmucr], %[tmp]\n"
  93. "tlbwe %[word0], %[index], 0\n"
  94. "tlbwe %[word1], %[index], 1\n"
  95. "tlbwe %[word2], %[index], 2\n"
  96. : [tmp] "=&r"(tmp)
  97. : [word0] "r"(stlbe->word0),
  98. [word1] "r"(stlbe->word1),
  99. [word2] "r"(stlbe->word2),
  100. [tid] "r"(stlbe->tid),
  101. [index] "r"(index),
  102. [sprn_mmucr] "i"(SPRN_MMUCR)
  103. );
  104. }
  105. static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
  106. {
  107. /* We only care about the guest's permission and user bits. */
  108. attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
  109. if (!usermode) {
  110. /* Guest is in supervisor mode, so we need to translate guest
  111. * supervisor permissions into user permissions. */
  112. attrib &= ~PPC44x_TLB_USER_PERM_MASK;
  113. attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
  114. }
  115. /* Make sure host can always access this memory. */
  116. attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
  117. /* WIMGE = 0b00100 */
  118. attrib |= PPC44x_TLB_M;
  119. return attrib;
  120. }
  121. /* Load shadow TLB back into hardware. */
  122. void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
  123. {
  124. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  125. int i;
  126. for (i = 0; i <= tlb_44x_hwater; i++) {
  127. struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
  128. if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
  129. kvmppc_44x_tlbwe(i, stlbe);
  130. }
  131. }
  132. static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
  133. unsigned int i)
  134. {
  135. vcpu_44x->shadow_tlb_mod[i] = 1;
  136. }
  137. /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
  138. void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
  139. {
  140. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  141. int i;
  142. for (i = 0; i <= tlb_44x_hwater; i++) {
  143. struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
  144. if (vcpu_44x->shadow_tlb_mod[i])
  145. kvmppc_44x_tlbre(i, stlbe);
  146. if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
  147. kvmppc_44x_tlbie(i);
  148. }
  149. }
  150. /* Search the guest TLB for a matching entry. */
  151. int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
  152. unsigned int as)
  153. {
  154. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  155. int i;
  156. /* XXX Replace loop with fancy data structures. */
  157. for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
  158. struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
  159. unsigned int tid;
  160. if (eaddr < get_tlb_eaddr(tlbe))
  161. continue;
  162. if (eaddr > get_tlb_end(tlbe))
  163. continue;
  164. tid = get_tlb_tid(tlbe);
  165. if (tid && (tid != pid))
  166. continue;
  167. if (!get_tlb_v(tlbe))
  168. continue;
  169. if (get_tlb_ts(tlbe) != as)
  170. continue;
  171. return i;
  172. }
  173. return -1;
  174. }
  175. gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
  176. gva_t eaddr)
  177. {
  178. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  179. struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
  180. unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
  181. return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
  182. }
  183. int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  184. {
  185. unsigned int as = !!(vcpu->arch.msr & MSR_IS);
  186. return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  187. }
  188. int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  189. {
  190. unsigned int as = !!(vcpu->arch.msr & MSR_DS);
  191. return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  192. }
  193. void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
  194. {
  195. }
  196. void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
  197. {
  198. }
  199. static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
  200. unsigned int stlb_index)
  201. {
  202. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
  203. if (!ref->page)
  204. return;
  205. /* Discard from the TLB. */
  206. /* Note: we could actually invalidate a host mapping, if the host overwrote
  207. * this TLB entry since we inserted a guest mapping. */
  208. kvmppc_44x_tlbie(stlb_index);
  209. /* Now release the page. */
  210. if (ref->writeable)
  211. kvm_release_page_dirty(ref->page);
  212. else
  213. kvm_release_page_clean(ref->page);
  214. ref->page = NULL;
  215. /* XXX set tlb_44x_index to stlb_index? */
  216. trace_kvm_stlb_inval(stlb_index);
  217. }
  218. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  219. {
  220. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  221. int i;
  222. for (i = 0; i <= tlb_44x_hwater; i++)
  223. kvmppc_44x_shadow_release(vcpu_44x, i);
  224. }
  225. /**
  226. * kvmppc_mmu_map -- create a host mapping for guest memory
  227. *
  228. * If the guest wanted a larger page than the host supports, only the first
  229. * host page is mapped here and the rest are demand faulted.
  230. *
  231. * If the guest wanted a smaller page than the host page size, we map only the
  232. * guest-size page (i.e. not a full host page mapping).
  233. *
  234. * Caller must ensure that the specified guest TLB entry is safe to insert into
  235. * the shadow TLB.
  236. */
  237. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
  238. unsigned int gtlb_index)
  239. {
  240. struct kvmppc_44x_tlbe stlbe;
  241. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  242. struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
  243. struct kvmppc_44x_shadow_ref *ref;
  244. struct page *new_page;
  245. hpa_t hpaddr;
  246. gfn_t gfn;
  247. u32 asid = gtlbe->tid;
  248. u32 flags = gtlbe->word2;
  249. u32 max_bytes = get_tlb_bytes(gtlbe);
  250. unsigned int victim;
  251. /* Select TLB entry to clobber. Indirectly guard against races with the TLB
  252. * miss handler by disabling interrupts. */
  253. local_irq_disable();
  254. victim = ++tlb_44x_index;
  255. if (victim > tlb_44x_hwater)
  256. victim = 0;
  257. tlb_44x_index = victim;
  258. local_irq_enable();
  259. /* Get reference to new page. */
  260. gfn = gpaddr >> PAGE_SHIFT;
  261. new_page = gfn_to_page(vcpu->kvm, gfn);
  262. if (is_error_page(new_page)) {
  263. printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
  264. kvm_release_page_clean(new_page);
  265. return;
  266. }
  267. hpaddr = page_to_phys(new_page);
  268. /* Invalidate any previous shadow mappings. */
  269. kvmppc_44x_shadow_release(vcpu_44x, victim);
  270. /* XXX Make sure (va, size) doesn't overlap any other
  271. * entries. 440x6 user manual says the result would be
  272. * "undefined." */
  273. /* XXX what about AS? */
  274. /* Force TS=1 for all guest mappings. */
  275. stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
  276. if (max_bytes >= PAGE_SIZE) {
  277. /* Guest mapping is larger than or equal to host page size. We can use
  278. * a "native" host mapping. */
  279. stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
  280. } else {
  281. /* Guest mapping is smaller than host page size. We must restrict the
  282. * size of the mapping to be at most the smaller of the two, but for
  283. * simplicity we fall back to a 4K mapping (this is probably what the
  284. * guest is using anyways). */
  285. stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
  286. /* 'hpaddr' is a host page, which is larger than the mapping we're
  287. * inserting here. To compensate, we must add the in-page offset to the
  288. * sub-page. */
  289. hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
  290. }
  291. stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
  292. stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
  293. vcpu->arch.msr & MSR_PR);
  294. stlbe.tid = !(asid & 0xff);
  295. /* Keep track of the reference so we can properly release it later. */
  296. ref = &vcpu_44x->shadow_refs[victim];
  297. ref->page = new_page;
  298. ref->gtlb_index = gtlb_index;
  299. ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
  300. ref->tid = stlbe.tid;
  301. /* Insert shadow mapping into hardware TLB. */
  302. kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
  303. kvmppc_44x_tlbwe(victim, &stlbe);
  304. trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
  305. stlbe.word2);
  306. }
  307. /* For a particular guest TLB entry, invalidate the corresponding host TLB
  308. * mappings and release the host pages. */
  309. static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
  310. unsigned int gtlb_index)
  311. {
  312. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  313. int i;
  314. for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
  315. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
  316. if (ref->gtlb_index == gtlb_index)
  317. kvmppc_44x_shadow_release(vcpu_44x, i);
  318. }
  319. }
  320. void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
  321. {
  322. vcpu->arch.shadow_pid = !usermode;
  323. }
  324. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
  325. {
  326. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  327. int i;
  328. if (unlikely(vcpu->arch.pid == new_pid))
  329. return;
  330. vcpu->arch.pid = new_pid;
  331. /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
  332. * can't access guest kernel mappings (TID=1). When we switch to a new
  333. * guest PID, which will also use host PID=0, we must discard the old guest
  334. * userspace mappings. */
  335. for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
  336. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
  337. if (ref->tid == 0)
  338. kvmppc_44x_shadow_release(vcpu_44x, i);
  339. }
  340. }
  341. static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
  342. const struct kvmppc_44x_tlbe *tlbe)
  343. {
  344. gpa_t gpa;
  345. if (!get_tlb_v(tlbe))
  346. return 0;
  347. /* Does it match current guest AS? */
  348. /* XXX what about IS != DS? */
  349. if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
  350. return 0;
  351. gpa = get_tlb_raddr(tlbe);
  352. if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
  353. /* Mapping is not for RAM. */
  354. return 0;
  355. return 1;
  356. }
  357. int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
  358. {
  359. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  360. struct kvmppc_44x_tlbe *tlbe;
  361. unsigned int gtlb_index;
  362. gtlb_index = vcpu->arch.gpr[ra];
  363. if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
  364. printk("%s: index %d\n", __func__, gtlb_index);
  365. kvmppc_dump_vcpu(vcpu);
  366. return EMULATE_FAIL;
  367. }
  368. tlbe = &vcpu_44x->guest_tlb[gtlb_index];
  369. /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
  370. if (tlbe->word0 & PPC44x_TLB_VALID)
  371. kvmppc_44x_invalidate(vcpu, gtlb_index);
  372. switch (ws) {
  373. case PPC44x_TLB_PAGEID:
  374. tlbe->tid = get_mmucr_stid(vcpu);
  375. tlbe->word0 = vcpu->arch.gpr[rs];
  376. break;
  377. case PPC44x_TLB_XLAT:
  378. tlbe->word1 = vcpu->arch.gpr[rs];
  379. break;
  380. case PPC44x_TLB_ATTRIB:
  381. tlbe->word2 = vcpu->arch.gpr[rs];
  382. break;
  383. default:
  384. return EMULATE_FAIL;
  385. }
  386. if (tlbe_is_host_safe(vcpu, tlbe)) {
  387. gva_t eaddr;
  388. gpa_t gpaddr;
  389. u32 bytes;
  390. eaddr = get_tlb_eaddr(tlbe);
  391. gpaddr = get_tlb_raddr(tlbe);
  392. /* Use the advertised page size to mask effective and real addrs. */
  393. bytes = get_tlb_bytes(tlbe);
  394. eaddr &= ~(bytes - 1);
  395. gpaddr &= ~(bytes - 1);
  396. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  397. }
  398. trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
  399. tlbe->word2);
  400. kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
  401. return EMULATE_DONE;
  402. }
  403. int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
  404. {
  405. u32 ea;
  406. int gtlb_index;
  407. unsigned int as = get_mmucr_sts(vcpu);
  408. unsigned int pid = get_mmucr_stid(vcpu);
  409. ea = vcpu->arch.gpr[rb];
  410. if (ra)
  411. ea += vcpu->arch.gpr[ra];
  412. gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
  413. if (rc) {
  414. if (gtlb_index < 0)
  415. vcpu->arch.cr &= ~0x20000000;
  416. else
  417. vcpu->arch.cr |= 0x20000000;
  418. }
  419. vcpu->arch.gpr[rt] = gtlb_index;
  420. kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
  421. return EMULATE_DONE;
  422. }