44x_tlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/mmu-44x.h>
  26. #include <asm/kvm_ppc.h>
  27. #include <asm/kvm_44x.h>
  28. #include "timing.h"
  29. #include "44x_tlb.h"
  30. #ifndef PPC44x_TLBE_SIZE
  31. #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
  32. #endif
  33. #define PAGE_SIZE_4K (1<<12)
  34. #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
  35. #define PPC44x_TLB_UATTR_MASK \
  36. (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
  37. #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
  38. #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
  39. #ifdef DEBUG
  40. void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  41. {
  42. struct kvmppc_44x_tlbe *tlbe;
  43. int i;
  44. printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
  45. printk("| %2s | %3s | %8s | %8s | %8s |\n",
  46. "nr", "tid", "word0", "word1", "word2");
  47. for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
  48. tlbe = &vcpu_44x->guest_tlb[i];
  49. if (tlbe->word0 & PPC44x_TLB_VALID)
  50. printk(" G%2d | %02X | %08X | %08X | %08X |\n",
  51. i, tlbe->tid, tlbe->word0, tlbe->word1,
  52. tlbe->word2);
  53. }
  54. }
  55. #endif
  56. static inline void kvmppc_44x_tlbie(unsigned int index)
  57. {
  58. /* 0 <= index < 64, so the V bit is clear and we can use the index as
  59. * word0. */
  60. asm volatile(
  61. "tlbwe %[index], %[index], 0\n"
  62. :
  63. : [index] "r"(index)
  64. );
  65. }
  66. static inline void kvmppc_44x_tlbre(unsigned int index,
  67. struct kvmppc_44x_tlbe *tlbe)
  68. {
  69. asm volatile(
  70. "tlbre %[word0], %[index], 0\n"
  71. "mfspr %[tid], %[sprn_mmucr]\n"
  72. "andi. %[tid], %[tid], 0xff\n"
  73. "tlbre %[word1], %[index], 1\n"
  74. "tlbre %[word2], %[index], 2\n"
  75. : [word0] "=r"(tlbe->word0),
  76. [word1] "=r"(tlbe->word1),
  77. [word2] "=r"(tlbe->word2),
  78. [tid] "=r"(tlbe->tid)
  79. : [index] "r"(index),
  80. [sprn_mmucr] "i"(SPRN_MMUCR)
  81. : "cc"
  82. );
  83. }
  84. static inline void kvmppc_44x_tlbwe(unsigned int index,
  85. struct kvmppc_44x_tlbe *stlbe)
  86. {
  87. unsigned long tmp;
  88. asm volatile(
  89. "mfspr %[tmp], %[sprn_mmucr]\n"
  90. "rlwimi %[tmp], %[tid], 0, 0xff\n"
  91. "mtspr %[sprn_mmucr], %[tmp]\n"
  92. "tlbwe %[word0], %[index], 0\n"
  93. "tlbwe %[word1], %[index], 1\n"
  94. "tlbwe %[word2], %[index], 2\n"
  95. : [tmp] "=&r"(tmp)
  96. : [word0] "r"(stlbe->word0),
  97. [word1] "r"(stlbe->word1),
  98. [word2] "r"(stlbe->word2),
  99. [tid] "r"(stlbe->tid),
  100. [index] "r"(index),
  101. [sprn_mmucr] "i"(SPRN_MMUCR)
  102. );
  103. }
  104. static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
  105. {
  106. /* We only care about the guest's permission and user bits. */
  107. attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
  108. if (!usermode) {
  109. /* Guest is in supervisor mode, so we need to translate guest
  110. * supervisor permissions into user permissions. */
  111. attrib &= ~PPC44x_TLB_USER_PERM_MASK;
  112. attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
  113. }
  114. /* Make sure host can always access this memory. */
  115. attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
  116. /* WIMGE = 0b00100 */
  117. attrib |= PPC44x_TLB_M;
  118. return attrib;
  119. }
  120. /* Load shadow TLB back into hardware. */
  121. void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
  122. {
  123. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  124. int i;
  125. for (i = 0; i <= tlb_44x_hwater; i++) {
  126. struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
  127. if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
  128. kvmppc_44x_tlbwe(i, stlbe);
  129. }
  130. }
  131. static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
  132. unsigned int i)
  133. {
  134. vcpu_44x->shadow_tlb_mod[i] = 1;
  135. }
  136. /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
  137. void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
  138. {
  139. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  140. int i;
  141. for (i = 0; i <= tlb_44x_hwater; i++) {
  142. struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
  143. if (vcpu_44x->shadow_tlb_mod[i])
  144. kvmppc_44x_tlbre(i, stlbe);
  145. if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
  146. kvmppc_44x_tlbie(i);
  147. }
  148. }
  149. /* Search the guest TLB for a matching entry. */
  150. int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
  151. unsigned int as)
  152. {
  153. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  154. int i;
  155. /* XXX Replace loop with fancy data structures. */
  156. for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
  157. struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
  158. unsigned int tid;
  159. if (eaddr < get_tlb_eaddr(tlbe))
  160. continue;
  161. if (eaddr > get_tlb_end(tlbe))
  162. continue;
  163. tid = get_tlb_tid(tlbe);
  164. if (tid && (tid != pid))
  165. continue;
  166. if (!get_tlb_v(tlbe))
  167. continue;
  168. if (get_tlb_ts(tlbe) != as)
  169. continue;
  170. return i;
  171. }
  172. return -1;
  173. }
  174. gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
  175. gva_t eaddr)
  176. {
  177. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  178. struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
  179. unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
  180. return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
  181. }
  182. int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  183. {
  184. unsigned int as = !!(vcpu->arch.msr & MSR_IS);
  185. return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  186. }
  187. int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
  188. {
  189. unsigned int as = !!(vcpu->arch.msr & MSR_DS);
  190. return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  191. }
  192. void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
  193. {
  194. }
  195. void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
  196. {
  197. }
  198. static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
  199. unsigned int stlb_index)
  200. {
  201. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
  202. if (!ref->page)
  203. return;
  204. /* Discard from the TLB. */
  205. /* Note: we could actually invalidate a host mapping, if the host overwrote
  206. * this TLB entry since we inserted a guest mapping. */
  207. kvmppc_44x_tlbie(stlb_index);
  208. /* Now release the page. */
  209. if (ref->writeable)
  210. kvm_release_page_dirty(ref->page);
  211. else
  212. kvm_release_page_clean(ref->page);
  213. ref->page = NULL;
  214. /* XXX set tlb_44x_index to stlb_index? */
  215. KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
  216. }
  217. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  218. {
  219. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  220. int i;
  221. for (i = 0; i <= tlb_44x_hwater; i++)
  222. kvmppc_44x_shadow_release(vcpu_44x, i);
  223. }
  224. /**
  225. * kvmppc_mmu_map -- create a host mapping for guest memory
  226. *
  227. * If the guest wanted a larger page than the host supports, only the first
  228. * host page is mapped here and the rest are demand faulted.
  229. *
  230. * If the guest wanted a smaller page than the host page size, we map only the
  231. * guest-size page (i.e. not a full host page mapping).
  232. *
  233. * Caller must ensure that the specified guest TLB entry is safe to insert into
  234. * the shadow TLB.
  235. */
  236. void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
  237. unsigned int gtlb_index)
  238. {
  239. struct kvmppc_44x_tlbe stlbe;
  240. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  241. struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
  242. struct kvmppc_44x_shadow_ref *ref;
  243. struct page *new_page;
  244. hpa_t hpaddr;
  245. gfn_t gfn;
  246. u32 asid = gtlbe->tid;
  247. u32 flags = gtlbe->word2;
  248. u32 max_bytes = get_tlb_bytes(gtlbe);
  249. unsigned int victim;
  250. /* Select TLB entry to clobber. Indirectly guard against races with the TLB
  251. * miss handler by disabling interrupts. */
  252. local_irq_disable();
  253. victim = ++tlb_44x_index;
  254. if (victim > tlb_44x_hwater)
  255. victim = 0;
  256. tlb_44x_index = victim;
  257. local_irq_enable();
  258. /* Get reference to new page. */
  259. gfn = gpaddr >> PAGE_SHIFT;
  260. new_page = gfn_to_page(vcpu->kvm, gfn);
  261. if (is_error_page(new_page)) {
  262. printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
  263. kvm_release_page_clean(new_page);
  264. return;
  265. }
  266. hpaddr = page_to_phys(new_page);
  267. /* Invalidate any previous shadow mappings. */
  268. kvmppc_44x_shadow_release(vcpu_44x, victim);
  269. /* XXX Make sure (va, size) doesn't overlap any other
  270. * entries. 440x6 user manual says the result would be
  271. * "undefined." */
  272. /* XXX what about AS? */
  273. /* Force TS=1 for all guest mappings. */
  274. stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
  275. if (max_bytes >= PAGE_SIZE) {
  276. /* Guest mapping is larger than or equal to host page size. We can use
  277. * a "native" host mapping. */
  278. stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
  279. } else {
  280. /* Guest mapping is smaller than host page size. We must restrict the
  281. * size of the mapping to be at most the smaller of the two, but for
  282. * simplicity we fall back to a 4K mapping (this is probably what the
  283. * guest is using anyways). */
  284. stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
  285. /* 'hpaddr' is a host page, which is larger than the mapping we're
  286. * inserting here. To compensate, we must add the in-page offset to the
  287. * sub-page. */
  288. hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
  289. }
  290. stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
  291. stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
  292. vcpu->arch.msr & MSR_PR);
  293. stlbe.tid = !(asid & 0xff);
  294. /* Keep track of the reference so we can properly release it later. */
  295. ref = &vcpu_44x->shadow_refs[victim];
  296. ref->page = new_page;
  297. ref->gtlb_index = gtlb_index;
  298. ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
  299. ref->tid = stlbe.tid;
  300. /* Insert shadow mapping into hardware TLB. */
  301. kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
  302. kvmppc_44x_tlbwe(victim, &stlbe);
  303. KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
  304. stlbe.word2, handler);
  305. }
  306. /* For a particular guest TLB entry, invalidate the corresponding host TLB
  307. * mappings and release the host pages. */
  308. static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
  309. unsigned int gtlb_index)
  310. {
  311. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  312. int i;
  313. for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
  314. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
  315. if (ref->gtlb_index == gtlb_index)
  316. kvmppc_44x_shadow_release(vcpu_44x, i);
  317. }
  318. }
  319. void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
  320. {
  321. vcpu->arch.shadow_pid = !usermode;
  322. }
  323. void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
  324. {
  325. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  326. int i;
  327. if (unlikely(vcpu->arch.pid == new_pid))
  328. return;
  329. vcpu->arch.pid = new_pid;
  330. /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
  331. * can't access guest kernel mappings (TID=1). When we switch to a new
  332. * guest PID, which will also use host PID=0, we must discard the old guest
  333. * userspace mappings. */
  334. for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
  335. struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
  336. if (ref->tid == 0)
  337. kvmppc_44x_shadow_release(vcpu_44x, i);
  338. }
  339. }
  340. static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
  341. const struct kvmppc_44x_tlbe *tlbe)
  342. {
  343. gpa_t gpa;
  344. if (!get_tlb_v(tlbe))
  345. return 0;
  346. /* Does it match current guest AS? */
  347. /* XXX what about IS != DS? */
  348. if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
  349. return 0;
  350. gpa = get_tlb_raddr(tlbe);
  351. if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
  352. /* Mapping is not for RAM. */
  353. return 0;
  354. return 1;
  355. }
  356. int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
  357. {
  358. struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
  359. struct kvmppc_44x_tlbe *tlbe;
  360. unsigned int gtlb_index;
  361. gtlb_index = vcpu->arch.gpr[ra];
  362. if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
  363. printk("%s: index %d\n", __func__, gtlb_index);
  364. kvmppc_dump_vcpu(vcpu);
  365. return EMULATE_FAIL;
  366. }
  367. tlbe = &vcpu_44x->guest_tlb[gtlb_index];
  368. /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
  369. if (tlbe->word0 & PPC44x_TLB_VALID)
  370. kvmppc_44x_invalidate(vcpu, gtlb_index);
  371. switch (ws) {
  372. case PPC44x_TLB_PAGEID:
  373. tlbe->tid = get_mmucr_stid(vcpu);
  374. tlbe->word0 = vcpu->arch.gpr[rs];
  375. break;
  376. case PPC44x_TLB_XLAT:
  377. tlbe->word1 = vcpu->arch.gpr[rs];
  378. break;
  379. case PPC44x_TLB_ATTRIB:
  380. tlbe->word2 = vcpu->arch.gpr[rs];
  381. break;
  382. default:
  383. return EMULATE_FAIL;
  384. }
  385. if (tlbe_is_host_safe(vcpu, tlbe)) {
  386. gva_t eaddr;
  387. gpa_t gpaddr;
  388. u32 bytes;
  389. eaddr = get_tlb_eaddr(tlbe);
  390. gpaddr = get_tlb_raddr(tlbe);
  391. /* Use the advertised page size to mask effective and real addrs. */
  392. bytes = get_tlb_bytes(tlbe);
  393. eaddr &= ~(bytes - 1);
  394. gpaddr &= ~(bytes - 1);
  395. kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
  396. }
  397. KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
  398. tlbe->word1, tlbe->word2, handler);
  399. kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
  400. return EMULATE_DONE;
  401. }
  402. int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
  403. {
  404. u32 ea;
  405. int gtlb_index;
  406. unsigned int as = get_mmucr_sts(vcpu);
  407. unsigned int pid = get_mmucr_stid(vcpu);
  408. ea = vcpu->arch.gpr[rb];
  409. if (ra)
  410. ea += vcpu->arch.gpr[ra];
  411. gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
  412. if (rc) {
  413. if (gtlb_index < 0)
  414. vcpu->arch.cr &= ~0x20000000;
  415. else
  416. vcpu->arch.cr |= 0x20000000;
  417. }
  418. vcpu->arch.gpr[rt] = gtlb_index;
  419. kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
  420. return EMULATE_DONE;
  421. }