kvm_tlb.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp.h>
  15. #include <linux/mm.h>
  16. #include <linux/delay.h>
  17. #include <linux/module.h>
  18. #include <linux/kvm_host.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #undef CONFIG_MIPS_MT
  25. #include <asm/r4kcache.h>
  26. #define CONFIG_MIPS_MT
  27. #define KVM_GUEST_PC_TLB 0
  28. #define KVM_GUEST_SP_TLB 1
  29. #define PRIx64 "llx"
  30. /* Use VZ EntryHi.EHINV to invalidate TLB entries */
  31. #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
  32. atomic_t kvm_mips_instance;
  33. EXPORT_SYMBOL(kvm_mips_instance);
  34. /* These function pointers are initialized once the KVM module is loaded */
  35. pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
  36. EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
  37. void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
  38. EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
  39. bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
  40. EXPORT_SYMBOL(kvm_mips_is_error_pfn);
  41. uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  42. {
  43. return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
  44. }
  45. uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  46. {
  47. return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
  48. }
  49. inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
  50. {
  51. return vcpu->kvm->arch.commpage_tlb;
  52. }
  53. /*
  54. * Structure defining an tlb entry data set.
  55. */
  56. void kvm_mips_dump_host_tlbs(void)
  57. {
  58. unsigned long old_entryhi;
  59. unsigned long old_pagemask;
  60. struct kvm_mips_tlb tlb;
  61. unsigned long flags;
  62. int i;
  63. local_irq_save(flags);
  64. old_entryhi = read_c0_entryhi();
  65. old_pagemask = read_c0_pagemask();
  66. printk("HOST TLBs:\n");
  67. printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
  68. for (i = 0; i < current_cpu_data.tlbsize; i++) {
  69. write_c0_index(i);
  70. mtc0_tlbw_hazard();
  71. tlb_read();
  72. tlbw_use_hazard();
  73. tlb.tlb_hi = read_c0_entryhi();
  74. tlb.tlb_lo0 = read_c0_entrylo0();
  75. tlb.tlb_lo1 = read_c0_entrylo1();
  76. tlb.tlb_mask = read_c0_pagemask();
  77. printk("TLB%c%3d Hi 0x%08lx ",
  78. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  79. i, tlb.tlb_hi);
  80. printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  81. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  82. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  83. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  84. (tlb.tlb_lo0 >> 3) & 7);
  85. printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  86. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  87. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  88. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  89. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  90. }
  91. write_c0_entryhi(old_entryhi);
  92. write_c0_pagemask(old_pagemask);
  93. mtc0_tlbw_hazard();
  94. local_irq_restore(flags);
  95. }
  96. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  97. {
  98. struct mips_coproc *cop0 = vcpu->arch.cop0;
  99. struct kvm_mips_tlb tlb;
  100. int i;
  101. printk("Guest TLBs:\n");
  102. printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  103. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  104. tlb = vcpu->arch.guest_tlb[i];
  105. printk("TLB%c%3d Hi 0x%08lx ",
  106. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  107. i, tlb.tlb_hi);
  108. printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  109. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  110. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  111. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  112. (tlb.tlb_lo0 >> 3) & 7);
  113. printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  114. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  115. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  116. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  117. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  118. }
  119. }
  120. void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
  121. {
  122. int i;
  123. volatile struct kvm_mips_tlb tlb;
  124. printk("Shadow TLBs:\n");
  125. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  126. tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
  127. printk("TLB%c%3d Hi 0x%08lx ",
  128. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  129. i, tlb.tlb_hi);
  130. printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  131. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  132. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  133. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  134. (tlb.tlb_lo0 >> 3) & 7);
  135. printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  136. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  137. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  138. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  139. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  140. }
  141. }
  142. static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
  143. {
  144. pfn_t pfn;
  145. if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
  146. return;
  147. pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
  148. if (kvm_mips_is_error_pfn(pfn)) {
  149. panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
  150. }
  151. kvm->arch.guest_pmap[gfn] = pfn;
  152. return;
  153. }
  154. /* Translate guest KSEG0 addresses to Host PA */
  155. unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  156. unsigned long gva)
  157. {
  158. gfn_t gfn;
  159. uint32_t offset = gva & ~PAGE_MASK;
  160. struct kvm *kvm = vcpu->kvm;
  161. if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
  162. kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
  163. __builtin_return_address(0), gva);
  164. return KVM_INVALID_PAGE;
  165. }
  166. gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
  167. if (gfn >= kvm->arch.guest_pmap_npages) {
  168. kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
  169. gva);
  170. return KVM_INVALID_PAGE;
  171. }
  172. kvm_mips_map_page(vcpu->kvm, gfn);
  173. return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
  174. }
  175. /* XXXKYMA: Must be called with interrupts disabled */
  176. /* set flush_dcache_mask == 0 if no dcache flush required */
  177. int
  178. kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
  179. unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
  180. {
  181. unsigned long flags;
  182. unsigned long old_entryhi;
  183. volatile int idx;
  184. local_irq_save(flags);
  185. old_entryhi = read_c0_entryhi();
  186. write_c0_entryhi(entryhi);
  187. mtc0_tlbw_hazard();
  188. tlb_probe();
  189. tlb_probe_hazard();
  190. idx = read_c0_index();
  191. if (idx > current_cpu_data.tlbsize) {
  192. kvm_err("%s: Invalid Index: %d\n", __func__, idx);
  193. kvm_mips_dump_host_tlbs();
  194. return -1;
  195. }
  196. if (idx < 0) {
  197. idx = read_c0_random() % current_cpu_data.tlbsize;
  198. write_c0_index(idx);
  199. mtc0_tlbw_hazard();
  200. }
  201. write_c0_entrylo0(entrylo0);
  202. write_c0_entrylo1(entrylo1);
  203. mtc0_tlbw_hazard();
  204. tlb_write_indexed();
  205. tlbw_use_hazard();
  206. #ifdef DEBUG
  207. if (debug) {
  208. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
  209. "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  210. vcpu->arch.pc, idx, read_c0_entryhi(),
  211. read_c0_entrylo0(), read_c0_entrylo1());
  212. }
  213. #endif
  214. /* Flush D-cache */
  215. if (flush_dcache_mask) {
  216. if (entrylo0 & MIPS3_PG_V) {
  217. ++vcpu->stat.flush_dcache_exits;
  218. flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
  219. }
  220. if (entrylo1 & MIPS3_PG_V) {
  221. ++vcpu->stat.flush_dcache_exits;
  222. flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
  223. (0x1 << PAGE_SHIFT));
  224. }
  225. }
  226. /* Restore old ASID */
  227. write_c0_entryhi(old_entryhi);
  228. mtc0_tlbw_hazard();
  229. tlbw_use_hazard();
  230. local_irq_restore(flags);
  231. return 0;
  232. }
  233. /* XXXKYMA: Must be called with interrupts disabled */
  234. int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
  235. struct kvm_vcpu *vcpu)
  236. {
  237. gfn_t gfn;
  238. pfn_t pfn0, pfn1;
  239. unsigned long vaddr = 0;
  240. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  241. int even;
  242. struct kvm *kvm = vcpu->kvm;
  243. const int flush_dcache_mask = 0;
  244. if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
  245. kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
  246. kvm_mips_dump_host_tlbs();
  247. return -1;
  248. }
  249. gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
  250. if (gfn >= kvm->arch.guest_pmap_npages) {
  251. kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
  252. gfn, badvaddr);
  253. kvm_mips_dump_host_tlbs();
  254. return -1;
  255. }
  256. even = !(gfn & 0x1);
  257. vaddr = badvaddr & (PAGE_MASK << 1);
  258. kvm_mips_map_page(vcpu->kvm, gfn);
  259. kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
  260. if (even) {
  261. pfn0 = kvm->arch.guest_pmap[gfn];
  262. pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
  263. } else {
  264. pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
  265. pfn1 = kvm->arch.guest_pmap[gfn];
  266. }
  267. entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
  268. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  269. (0x1 << 1);
  270. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  271. (0x1 << 1);
  272. return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  273. flush_dcache_mask);
  274. }
  275. int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  276. struct kvm_vcpu *vcpu)
  277. {
  278. pfn_t pfn0, pfn1;
  279. unsigned long flags, old_entryhi = 0, vaddr = 0;
  280. unsigned long entrylo0 = 0, entrylo1 = 0;
  281. pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
  282. pfn1 = 0;
  283. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  284. (0x1 << 1);
  285. entrylo1 = 0;
  286. local_irq_save(flags);
  287. old_entryhi = read_c0_entryhi();
  288. vaddr = badvaddr & (PAGE_MASK << 1);
  289. write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
  290. mtc0_tlbw_hazard();
  291. write_c0_entrylo0(entrylo0);
  292. mtc0_tlbw_hazard();
  293. write_c0_entrylo1(entrylo1);
  294. mtc0_tlbw_hazard();
  295. write_c0_index(kvm_mips_get_commpage_asid(vcpu));
  296. mtc0_tlbw_hazard();
  297. tlb_write_indexed();
  298. mtc0_tlbw_hazard();
  299. tlbw_use_hazard();
  300. #ifdef DEBUG
  301. kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  302. vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
  303. read_c0_entrylo0(), read_c0_entrylo1());
  304. #endif
  305. /* Restore old ASID */
  306. write_c0_entryhi(old_entryhi);
  307. mtc0_tlbw_hazard();
  308. tlbw_use_hazard();
  309. local_irq_restore(flags);
  310. return 0;
  311. }
  312. int
  313. kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  314. struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
  315. {
  316. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  317. struct kvm *kvm = vcpu->kvm;
  318. pfn_t pfn0, pfn1;
  319. if ((tlb->tlb_hi & VPN2_MASK) == 0) {
  320. pfn0 = 0;
  321. pfn1 = 0;
  322. } else {
  323. kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
  324. kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
  325. pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
  326. pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
  327. }
  328. if (hpa0)
  329. *hpa0 = pfn0 << PAGE_SHIFT;
  330. if (hpa1)
  331. *hpa1 = pfn1 << PAGE_SHIFT;
  332. /* Get attributes from the Guest TLB */
  333. entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
  334. kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
  335. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
  336. (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
  337. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
  338. (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
  339. #ifdef DEBUG
  340. kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
  341. tlb->tlb_lo0, tlb->tlb_lo1);
  342. #endif
  343. return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  344. tlb->tlb_mask);
  345. }
  346. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  347. {
  348. int i;
  349. int index = -1;
  350. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  351. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  352. if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
  353. (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
  354. index = i;
  355. break;
  356. }
  357. }
  358. #ifdef DEBUG
  359. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  360. __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
  361. #endif
  362. return index;
  363. }
  364. int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
  365. {
  366. unsigned long old_entryhi, flags;
  367. volatile int idx;
  368. local_irq_save(flags);
  369. old_entryhi = read_c0_entryhi();
  370. if (KVM_GUEST_KERNEL_MODE(vcpu))
  371. write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
  372. else {
  373. write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  374. }
  375. mtc0_tlbw_hazard();
  376. tlb_probe();
  377. tlb_probe_hazard();
  378. idx = read_c0_index();
  379. /* Restore old ASID */
  380. write_c0_entryhi(old_entryhi);
  381. mtc0_tlbw_hazard();
  382. tlbw_use_hazard();
  383. local_irq_restore(flags);
  384. #ifdef DEBUG
  385. kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
  386. #endif
  387. return idx;
  388. }
  389. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
  390. {
  391. int idx;
  392. unsigned long flags, old_entryhi;
  393. local_irq_save(flags);
  394. old_entryhi = read_c0_entryhi();
  395. write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  396. mtc0_tlbw_hazard();
  397. tlb_probe();
  398. tlb_probe_hazard();
  399. idx = read_c0_index();
  400. if (idx >= current_cpu_data.tlbsize)
  401. BUG();
  402. if (idx > 0) {
  403. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  404. mtc0_tlbw_hazard();
  405. write_c0_entrylo0(0);
  406. mtc0_tlbw_hazard();
  407. write_c0_entrylo1(0);
  408. mtc0_tlbw_hazard();
  409. tlb_write_indexed();
  410. mtc0_tlbw_hazard();
  411. }
  412. write_c0_entryhi(old_entryhi);
  413. mtc0_tlbw_hazard();
  414. tlbw_use_hazard();
  415. local_irq_restore(flags);
  416. #ifdef DEBUG
  417. if (idx > 0) {
  418. kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
  419. (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
  420. }
  421. #endif
  422. return 0;
  423. }
  424. /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
  425. int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
  426. {
  427. unsigned long flags, old_entryhi;
  428. if (index >= current_cpu_data.tlbsize)
  429. BUG();
  430. local_irq_save(flags);
  431. old_entryhi = read_c0_entryhi();
  432. write_c0_entryhi(UNIQUE_ENTRYHI(index));
  433. mtc0_tlbw_hazard();
  434. write_c0_index(index);
  435. mtc0_tlbw_hazard();
  436. write_c0_entrylo0(0);
  437. mtc0_tlbw_hazard();
  438. write_c0_entrylo1(0);
  439. mtc0_tlbw_hazard();
  440. tlb_write_indexed();
  441. mtc0_tlbw_hazard();
  442. tlbw_use_hazard();
  443. write_c0_entryhi(old_entryhi);
  444. mtc0_tlbw_hazard();
  445. tlbw_use_hazard();
  446. local_irq_restore(flags);
  447. return 0;
  448. }
  449. void kvm_mips_flush_host_tlb(int skip_kseg0)
  450. {
  451. unsigned long flags;
  452. unsigned long old_entryhi, entryhi;
  453. unsigned long old_pagemask;
  454. int entry = 0;
  455. int maxentry = current_cpu_data.tlbsize;
  456. local_irq_save(flags);
  457. old_entryhi = read_c0_entryhi();
  458. old_pagemask = read_c0_pagemask();
  459. /* Blast 'em all away. */
  460. for (entry = 0; entry < maxentry; entry++) {
  461. write_c0_index(entry);
  462. mtc0_tlbw_hazard();
  463. if (skip_kseg0) {
  464. tlb_read();
  465. tlbw_use_hazard();
  466. entryhi = read_c0_entryhi();
  467. /* Don't blow away guest kernel entries */
  468. if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
  469. continue;
  470. }
  471. }
  472. /* Make sure all entries differ. */
  473. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  474. mtc0_tlbw_hazard();
  475. write_c0_entrylo0(0);
  476. mtc0_tlbw_hazard();
  477. write_c0_entrylo1(0);
  478. mtc0_tlbw_hazard();
  479. tlb_write_indexed();
  480. mtc0_tlbw_hazard();
  481. }
  482. tlbw_use_hazard();
  483. write_c0_entryhi(old_entryhi);
  484. write_c0_pagemask(old_pagemask);
  485. mtc0_tlbw_hazard();
  486. tlbw_use_hazard();
  487. local_irq_restore(flags);
  488. }
  489. void
  490. kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  491. struct kvm_vcpu *vcpu)
  492. {
  493. unsigned long asid = asid_cache(cpu);
  494. if (!(ASID_MASK(ASID_INC(asid)))) {
  495. if (cpu_has_vtag_icache) {
  496. flush_icache_all();
  497. }
  498. kvm_local_flush_tlb_all(); /* start new asid cycle */
  499. if (!asid) /* fix version if needed */
  500. asid = ASID_FIRST_VERSION;
  501. }
  502. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  503. }
  504. void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
  505. {
  506. unsigned long flags;
  507. unsigned long old_entryhi;
  508. unsigned long old_pagemask;
  509. int entry = 0;
  510. int cpu = smp_processor_id();
  511. local_irq_save(flags);
  512. old_entryhi = read_c0_entryhi();
  513. old_pagemask = read_c0_pagemask();
  514. for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
  515. write_c0_index(entry);
  516. mtc0_tlbw_hazard();
  517. tlb_read();
  518. tlbw_use_hazard();
  519. vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
  520. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
  521. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
  522. vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
  523. }
  524. write_c0_entryhi(old_entryhi);
  525. write_c0_pagemask(old_pagemask);
  526. mtc0_tlbw_hazard();
  527. local_irq_restore(flags);
  528. }
  529. void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
  530. {
  531. unsigned long flags;
  532. unsigned long old_ctx;
  533. int entry;
  534. int cpu = smp_processor_id();
  535. local_irq_save(flags);
  536. old_ctx = read_c0_entryhi();
  537. for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
  538. write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
  539. mtc0_tlbw_hazard();
  540. write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
  541. write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
  542. write_c0_index(entry);
  543. mtc0_tlbw_hazard();
  544. tlb_write_indexed();
  545. tlbw_use_hazard();
  546. }
  547. tlbw_use_hazard();
  548. write_c0_entryhi(old_ctx);
  549. mtc0_tlbw_hazard();
  550. local_irq_restore(flags);
  551. }
  552. void kvm_local_flush_tlb_all(void)
  553. {
  554. unsigned long flags;
  555. unsigned long old_ctx;
  556. int entry = 0;
  557. local_irq_save(flags);
  558. /* Save old context and create impossible VPN2 value */
  559. old_ctx = read_c0_entryhi();
  560. write_c0_entrylo0(0);
  561. write_c0_entrylo1(0);
  562. /* Blast 'em all away. */
  563. while (entry < current_cpu_data.tlbsize) {
  564. /* Make sure all entries differ. */
  565. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  566. write_c0_index(entry);
  567. mtc0_tlbw_hazard();
  568. tlb_write_indexed();
  569. entry++;
  570. }
  571. tlbw_use_hazard();
  572. write_c0_entryhi(old_ctx);
  573. mtc0_tlbw_hazard();
  574. local_irq_restore(flags);
  575. }
  576. void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
  577. {
  578. int cpu, entry;
  579. for_each_possible_cpu(cpu) {
  580. for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
  581. vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
  582. UNIQUE_ENTRYHI(entry);
  583. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
  584. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
  585. vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
  586. read_c0_pagemask();
  587. #ifdef DEBUG
  588. kvm_debug
  589. ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
  590. cpu, entry,
  591. vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
  592. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
  593. vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
  594. #endif
  595. }
  596. }
  597. }
  598. /* Restore ASID once we are scheduled back after preemption */
  599. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  600. {
  601. unsigned long flags;
  602. int newasid = 0;
  603. #ifdef DEBUG
  604. kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
  605. #endif
  606. /* Alocate new kernel and user ASIDs if needed */
  607. local_irq_save(flags);
  608. if (((vcpu->arch.
  609. guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
  610. kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
  611. vcpu->arch.guest_kernel_asid[cpu] =
  612. vcpu->arch.guest_kernel_mm.context.asid[cpu];
  613. kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
  614. vcpu->arch.guest_user_asid[cpu] =
  615. vcpu->arch.guest_user_mm.context.asid[cpu];
  616. newasid++;
  617. kvm_info("[%d]: cpu_context: %#lx\n", cpu,
  618. cpu_context(cpu, current->mm));
  619. kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
  620. cpu, vcpu->arch.guest_kernel_asid[cpu]);
  621. kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
  622. vcpu->arch.guest_user_asid[cpu]);
  623. }
  624. if (vcpu->arch.last_sched_cpu != cpu) {
  625. kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
  626. vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
  627. }
  628. /* Only reload shadow host TLB if new ASIDs haven't been allocated */
  629. #if 0
  630. if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
  631. kvm_mips_flush_host_tlb(0);
  632. kvm_shadow_tlb_load(vcpu);
  633. }
  634. #endif
  635. if (!newasid) {
  636. /* If we preempted while the guest was executing, then reload the pre-empted ASID */
  637. if (current->flags & PF_VCPU) {
  638. write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
  639. ehb();
  640. }
  641. } else {
  642. /* New ASIDs were allocated for the VM */
  643. /* Were we in guest context? If so then the pre-empted ASID is no longer
  644. * valid, we need to set it to what it should be based on the mode of
  645. * the Guest (Kernel/User)
  646. */
  647. if (current->flags & PF_VCPU) {
  648. if (KVM_GUEST_KERNEL_MODE(vcpu))
  649. write_c0_entryhi(ASID_MASK(vcpu->arch.
  650. guest_kernel_asid[cpu]));
  651. else
  652. write_c0_entryhi(ASID_MASK(vcpu->arch.
  653. guest_user_asid[cpu]));
  654. ehb();
  655. }
  656. }
  657. local_irq_restore(flags);
  658. }
  659. /* ASID can change if another task is scheduled during preemption */
  660. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  661. {
  662. unsigned long flags;
  663. uint32_t cpu;
  664. local_irq_save(flags);
  665. cpu = smp_processor_id();
  666. vcpu->arch.preempt_entryhi = read_c0_entryhi();
  667. vcpu->arch.last_sched_cpu = cpu;
  668. #if 0
  669. if ((atomic_read(&kvm_mips_instance) > 1)) {
  670. kvm_shadow_tlb_put(vcpu);
  671. }
  672. #endif
  673. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  674. ASID_VERSION_MASK)) {
  675. kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
  676. cpu_context(cpu, current->mm));
  677. drop_mmu_context(current->mm, cpu);
  678. }
  679. write_c0_entryhi(cpu_asid(cpu, current->mm));
  680. ehb();
  681. local_irq_restore(flags);
  682. }
  683. uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
  684. {
  685. struct mips_coproc *cop0 = vcpu->arch.cop0;
  686. unsigned long paddr, flags;
  687. uint32_t inst;
  688. int index;
  689. if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
  690. KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  691. local_irq_save(flags);
  692. index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
  693. if (index >= 0) {
  694. inst = *(opc);
  695. } else {
  696. index =
  697. kvm_mips_guest_tlb_lookup(vcpu,
  698. ((unsigned long) opc & VPN2_MASK)
  699. |
  700. ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
  701. if (index < 0) {
  702. kvm_err
  703. ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
  704. __func__, opc, vcpu, read_c0_entryhi());
  705. kvm_mips_dump_host_tlbs();
  706. local_irq_restore(flags);
  707. return KVM_INVALID_INST;
  708. }
  709. kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
  710. &vcpu->arch.
  711. guest_tlb[index],
  712. NULL, NULL);
  713. inst = *(opc);
  714. }
  715. local_irq_restore(flags);
  716. } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
  717. paddr =
  718. kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
  719. (unsigned long) opc);
  720. inst = *(uint32_t *) CKSEG0ADDR(paddr);
  721. } else {
  722. kvm_err("%s: illegal address: %p\n", __func__, opc);
  723. return KVM_INVALID_INST;
  724. }
  725. return inst;
  726. }
  727. EXPORT_SYMBOL(kvm_local_flush_tlb_all);
  728. EXPORT_SYMBOL(kvm_shadow_tlb_put);
  729. EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
  730. EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
  731. EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
  732. EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
  733. EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
  734. EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
  735. EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
  736. EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
  737. EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
  738. EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
  739. EXPORT_SYMBOL(kvm_shadow_tlb_load);
  740. EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
  741. EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
  742. EXPORT_SYMBOL(kvm_get_inst);
  743. EXPORT_SYMBOL(kvm_arch_vcpu_load);
  744. EXPORT_SYMBOL(kvm_arch_vcpu_put);