book3s_hv_rm_mmu.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/string.h>
  10. #include <linux/kvm.h>
  11. #include <linux/kvm_host.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/module.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/kvm_ppc.h>
  16. #include <asm/kvm_book3s.h>
  17. #include <asm/mmu-hash64.h>
  18. #include <asm/hvcall.h>
  19. #include <asm/synch.h>
  20. #include <asm/ppc-opcode.h>
  21. /* Translate address of a vmalloc'd thing to a linear map address */
  22. static void *real_vmalloc_addr(void *x)
  23. {
  24. unsigned long addr = (unsigned long) x;
  25. pte_t *p;
  26. p = find_linux_pte(swapper_pg_dir, addr);
  27. if (!p || !pte_present(*p))
  28. return NULL;
  29. /* assume we don't have huge pages in vmalloc space... */
  30. addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
  31. return __va(addr);
  32. }
  33. /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
  34. static int global_invalidates(struct kvm *kvm, unsigned long flags)
  35. {
  36. int global;
  37. /*
  38. * If there is only one vcore, and it's currently running,
  39. * we can use tlbiel as long as we mark all other physical
  40. * cores as potentially having stale TLB entries for this lpid.
  41. * If we're not using MMU notifiers, we never take pages away
  42. * from the guest, so we can use tlbiel if requested.
  43. * Otherwise, don't use tlbiel.
  44. */
  45. if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
  46. global = 0;
  47. else if (kvm->arch.using_mmu_notifiers)
  48. global = 1;
  49. else
  50. global = !(flags & H_LOCAL);
  51. if (!global) {
  52. /* any other core might now have stale TLB entries... */
  53. smp_wmb();
  54. cpumask_setall(&kvm->arch.need_tlb_flush);
  55. cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
  56. &kvm->arch.need_tlb_flush);
  57. }
  58. return global;
  59. }
  60. /*
  61. * Add this HPTE into the chain for the real page.
  62. * Must be called with the chain locked; it unlocks the chain.
  63. */
  64. void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
  65. unsigned long *rmap, long pte_index, int realmode)
  66. {
  67. struct revmap_entry *head, *tail;
  68. unsigned long i;
  69. if (*rmap & KVMPPC_RMAP_PRESENT) {
  70. i = *rmap & KVMPPC_RMAP_INDEX;
  71. head = &kvm->arch.revmap[i];
  72. if (realmode)
  73. head = real_vmalloc_addr(head);
  74. tail = &kvm->arch.revmap[head->back];
  75. if (realmode)
  76. tail = real_vmalloc_addr(tail);
  77. rev->forw = i;
  78. rev->back = head->back;
  79. tail->forw = pte_index;
  80. head->back = pte_index;
  81. } else {
  82. rev->forw = rev->back = pte_index;
  83. *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
  84. pte_index | KVMPPC_RMAP_PRESENT;
  85. }
  86. unlock_rmap(rmap);
  87. }
  88. EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
  89. /* Remove this HPTE from the chain for a real page */
  90. static void remove_revmap_chain(struct kvm *kvm, long pte_index,
  91. struct revmap_entry *rev,
  92. unsigned long hpte_v, unsigned long hpte_r)
  93. {
  94. struct revmap_entry *next, *prev;
  95. unsigned long gfn, ptel, head;
  96. struct kvm_memory_slot *memslot;
  97. unsigned long *rmap;
  98. unsigned long rcbits;
  99. rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
  100. ptel = rev->guest_rpte |= rcbits;
  101. gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
  102. memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
  103. if (!memslot)
  104. return;
  105. rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
  106. lock_rmap(rmap);
  107. head = *rmap & KVMPPC_RMAP_INDEX;
  108. next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
  109. prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
  110. next->back = rev->back;
  111. prev->forw = rev->forw;
  112. if (head == pte_index) {
  113. head = rev->forw;
  114. if (head == pte_index)
  115. *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
  116. else
  117. *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
  118. }
  119. *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
  120. unlock_rmap(rmap);
  121. }
  122. static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
  123. int writing, unsigned long *pte_sizep)
  124. {
  125. pte_t *ptep;
  126. unsigned long ps = *pte_sizep;
  127. unsigned int shift;
  128. ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
  129. if (!ptep)
  130. return __pte(0);
  131. if (shift)
  132. *pte_sizep = 1ul << shift;
  133. else
  134. *pte_sizep = PAGE_SIZE;
  135. if (ps > *pte_sizep)
  136. return __pte(0);
  137. if (!pte_present(*ptep))
  138. return __pte(0);
  139. return kvmppc_read_update_linux_pte(ptep, writing);
  140. }
  141. static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
  142. {
  143. asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
  144. hpte[0] = hpte_v;
  145. }
  146. long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
  147. long pte_index, unsigned long pteh, unsigned long ptel,
  148. pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
  149. {
  150. unsigned long i, pa, gpa, gfn, psize;
  151. unsigned long slot_fn, hva;
  152. unsigned long *hpte;
  153. struct revmap_entry *rev;
  154. unsigned long g_ptel;
  155. struct kvm_memory_slot *memslot;
  156. unsigned long *physp, pte_size;
  157. unsigned long is_io;
  158. unsigned long *rmap;
  159. pte_t pte;
  160. unsigned int writing;
  161. unsigned long mmu_seq;
  162. unsigned long rcbits;
  163. psize = hpte_page_size(pteh, ptel);
  164. if (!psize)
  165. return H_PARAMETER;
  166. writing = hpte_is_writable(ptel);
  167. pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
  168. ptel &= ~HPTE_GR_RESERVED;
  169. g_ptel = ptel;
  170. /* used later to detect if we might have been invalidated */
  171. mmu_seq = kvm->mmu_notifier_seq;
  172. smp_rmb();
  173. /* Find the memslot (if any) for this address */
  174. gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
  175. gfn = gpa >> PAGE_SHIFT;
  176. memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
  177. pa = 0;
  178. is_io = ~0ul;
  179. rmap = NULL;
  180. if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
  181. /* PPC970 can't do emulated MMIO */
  182. if (!cpu_has_feature(CPU_FTR_ARCH_206))
  183. return H_PARAMETER;
  184. /* Emulated MMIO - mark this with key=31 */
  185. pteh |= HPTE_V_ABSENT;
  186. ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
  187. goto do_insert;
  188. }
  189. /* Check if the requested page fits entirely in the memslot. */
  190. if (!slot_is_aligned(memslot, psize))
  191. return H_PARAMETER;
  192. slot_fn = gfn - memslot->base_gfn;
  193. rmap = &memslot->arch.rmap[slot_fn];
  194. if (!kvm->arch.using_mmu_notifiers) {
  195. physp = memslot->arch.slot_phys;
  196. if (!physp)
  197. return H_PARAMETER;
  198. physp += slot_fn;
  199. if (realmode)
  200. physp = real_vmalloc_addr(physp);
  201. pa = *physp;
  202. if (!pa)
  203. return H_TOO_HARD;
  204. is_io = pa & (HPTE_R_I | HPTE_R_W);
  205. pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
  206. pa &= PAGE_MASK;
  207. } else {
  208. /* Translate to host virtual address */
  209. hva = __gfn_to_hva_memslot(memslot, gfn);
  210. /* Look up the Linux PTE for the backing page */
  211. pte_size = psize;
  212. pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
  213. if (pte_present(pte)) {
  214. if (writing && !pte_write(pte))
  215. /* make the actual HPTE be read-only */
  216. ptel = hpte_make_readonly(ptel);
  217. is_io = hpte_cache_bits(pte_val(pte));
  218. pa = pte_pfn(pte) << PAGE_SHIFT;
  219. }
  220. }
  221. if (pte_size < psize)
  222. return H_PARAMETER;
  223. if (pa && pte_size > psize)
  224. pa |= gpa & (pte_size - 1);
  225. ptel &= ~(HPTE_R_PP0 - psize);
  226. ptel |= pa;
  227. if (pa)
  228. pteh |= HPTE_V_VALID;
  229. else
  230. pteh |= HPTE_V_ABSENT;
  231. /* Check WIMG */
  232. if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
  233. if (is_io)
  234. return H_PARAMETER;
  235. /*
  236. * Allow guest to map emulated device memory as
  237. * uncacheable, but actually make it cacheable.
  238. */
  239. ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
  240. ptel |= HPTE_R_M;
  241. }
  242. /* Find and lock the HPTEG slot to use */
  243. do_insert:
  244. if (pte_index >= kvm->arch.hpt_npte)
  245. return H_PARAMETER;
  246. if (likely((flags & H_EXACT) == 0)) {
  247. pte_index &= ~7UL;
  248. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  249. for (i = 0; i < 8; ++i) {
  250. if ((*hpte & HPTE_V_VALID) == 0 &&
  251. try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
  252. HPTE_V_ABSENT))
  253. break;
  254. hpte += 2;
  255. }
  256. if (i == 8) {
  257. /*
  258. * Since try_lock_hpte doesn't retry (not even stdcx.
  259. * failures), it could be that there is a free slot
  260. * but we transiently failed to lock it. Try again,
  261. * actually locking each slot and checking it.
  262. */
  263. hpte -= 16;
  264. for (i = 0; i < 8; ++i) {
  265. while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
  266. cpu_relax();
  267. if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
  268. break;
  269. *hpte &= ~HPTE_V_HVLOCK;
  270. hpte += 2;
  271. }
  272. if (i == 8)
  273. return H_PTEG_FULL;
  274. }
  275. pte_index += i;
  276. } else {
  277. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  278. if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
  279. HPTE_V_ABSENT)) {
  280. /* Lock the slot and check again */
  281. while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
  282. cpu_relax();
  283. if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
  284. *hpte &= ~HPTE_V_HVLOCK;
  285. return H_PTEG_FULL;
  286. }
  287. }
  288. }
  289. /* Save away the guest's idea of the second HPTE dword */
  290. rev = &kvm->arch.revmap[pte_index];
  291. if (realmode)
  292. rev = real_vmalloc_addr(rev);
  293. if (rev) {
  294. rev->guest_rpte = g_ptel;
  295. note_hpte_modification(kvm, rev);
  296. }
  297. /* Link HPTE into reverse-map chain */
  298. if (pteh & HPTE_V_VALID) {
  299. if (realmode)
  300. rmap = real_vmalloc_addr(rmap);
  301. lock_rmap(rmap);
  302. /* Check for pending invalidations under the rmap chain lock */
  303. if (kvm->arch.using_mmu_notifiers &&
  304. mmu_notifier_retry(kvm, mmu_seq)) {
  305. /* inval in progress, write a non-present HPTE */
  306. pteh |= HPTE_V_ABSENT;
  307. pteh &= ~HPTE_V_VALID;
  308. unlock_rmap(rmap);
  309. } else {
  310. kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
  311. realmode);
  312. /* Only set R/C in real HPTE if already set in *rmap */
  313. rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
  314. ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
  315. }
  316. }
  317. hpte[1] = ptel;
  318. /* Write the first HPTE dword, unlocking the HPTE and making it valid */
  319. eieio();
  320. hpte[0] = pteh;
  321. asm volatile("ptesync" : : : "memory");
  322. *pte_idx_ret = pte_index;
  323. return H_SUCCESS;
  324. }
  325. EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
  326. long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  327. long pte_index, unsigned long pteh, unsigned long ptel)
  328. {
  329. return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
  330. vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
  331. }
  332. #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
  333. static inline int try_lock_tlbie(unsigned int *lock)
  334. {
  335. unsigned int tmp, old;
  336. unsigned int token = LOCK_TOKEN;
  337. asm volatile("1:lwarx %1,0,%2\n"
  338. " cmpwi cr0,%1,0\n"
  339. " bne 2f\n"
  340. " stwcx. %3,0,%2\n"
  341. " bne- 1b\n"
  342. " isync\n"
  343. "2:"
  344. : "=&r" (tmp), "=&r" (old)
  345. : "r" (lock), "r" (token)
  346. : "cc", "memory");
  347. return old == 0;
  348. }
  349. long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
  350. unsigned long pte_index, unsigned long avpn,
  351. unsigned long *hpret)
  352. {
  353. unsigned long *hpte;
  354. unsigned long v, r, rb;
  355. struct revmap_entry *rev;
  356. if (pte_index >= kvm->arch.hpt_npte)
  357. return H_PARAMETER;
  358. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  359. while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
  360. cpu_relax();
  361. if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
  362. ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
  363. ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
  364. hpte[0] &= ~HPTE_V_HVLOCK;
  365. return H_NOT_FOUND;
  366. }
  367. rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
  368. v = hpte[0] & ~HPTE_V_HVLOCK;
  369. if (v & HPTE_V_VALID) {
  370. hpte[0] &= ~HPTE_V_VALID;
  371. rb = compute_tlbie_rb(v, hpte[1], pte_index);
  372. if (global_invalidates(kvm, flags)) {
  373. while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
  374. cpu_relax();
  375. asm volatile("ptesync" : : : "memory");
  376. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  377. : : "r" (rb), "r" (kvm->arch.lpid));
  378. asm volatile("ptesync" : : : "memory");
  379. kvm->arch.tlbie_lock = 0;
  380. } else {
  381. asm volatile("ptesync" : : : "memory");
  382. asm volatile("tlbiel %0" : : "r" (rb));
  383. asm volatile("ptesync" : : : "memory");
  384. }
  385. /* Read PTE low word after tlbie to get final R/C values */
  386. remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
  387. }
  388. r = rev->guest_rpte & ~HPTE_GR_RESERVED;
  389. note_hpte_modification(kvm, rev);
  390. unlock_hpte(hpte, 0);
  391. hpret[0] = v;
  392. hpret[1] = r;
  393. return H_SUCCESS;
  394. }
  395. EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
  396. long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
  397. unsigned long pte_index, unsigned long avpn)
  398. {
  399. return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
  400. &vcpu->arch.gpr[4]);
  401. }
  402. long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
  403. {
  404. struct kvm *kvm = vcpu->kvm;
  405. unsigned long *args = &vcpu->arch.gpr[4];
  406. unsigned long *hp, *hptes[4], tlbrb[4];
  407. long int i, j, k, n, found, indexes[4];
  408. unsigned long flags, req, pte_index, rcbits;
  409. long int local = 0;
  410. long int ret = H_SUCCESS;
  411. struct revmap_entry *rev, *revs[4];
  412. if (atomic_read(&kvm->online_vcpus) == 1)
  413. local = 1;
  414. for (i = 0; i < 4 && ret == H_SUCCESS; ) {
  415. n = 0;
  416. for (; i < 4; ++i) {
  417. j = i * 2;
  418. pte_index = args[j];
  419. flags = pte_index >> 56;
  420. pte_index &= ((1ul << 56) - 1);
  421. req = flags >> 6;
  422. flags &= 3;
  423. if (req == 3) { /* no more requests */
  424. i = 4;
  425. break;
  426. }
  427. if (req != 1 || flags == 3 ||
  428. pte_index >= kvm->arch.hpt_npte) {
  429. /* parameter error */
  430. args[j] = ((0xa0 | flags) << 56) + pte_index;
  431. ret = H_PARAMETER;
  432. break;
  433. }
  434. hp = (unsigned long *)
  435. (kvm->arch.hpt_virt + (pte_index << 4));
  436. /* to avoid deadlock, don't spin except for first */
  437. if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
  438. if (n)
  439. break;
  440. while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
  441. cpu_relax();
  442. }
  443. found = 0;
  444. if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
  445. switch (flags & 3) {
  446. case 0: /* absolute */
  447. found = 1;
  448. break;
  449. case 1: /* andcond */
  450. if (!(hp[0] & args[j + 1]))
  451. found = 1;
  452. break;
  453. case 2: /* AVPN */
  454. if ((hp[0] & ~0x7fUL) == args[j + 1])
  455. found = 1;
  456. break;
  457. }
  458. }
  459. if (!found) {
  460. hp[0] &= ~HPTE_V_HVLOCK;
  461. args[j] = ((0x90 | flags) << 56) + pte_index;
  462. continue;
  463. }
  464. args[j] = ((0x80 | flags) << 56) + pte_index;
  465. rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
  466. note_hpte_modification(kvm, rev);
  467. if (!(hp[0] & HPTE_V_VALID)) {
  468. /* insert R and C bits from PTE */
  469. rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
  470. args[j] |= rcbits << (56 - 5);
  471. hp[0] = 0;
  472. continue;
  473. }
  474. hp[0] &= ~HPTE_V_VALID; /* leave it locked */
  475. tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
  476. indexes[n] = j;
  477. hptes[n] = hp;
  478. revs[n] = rev;
  479. ++n;
  480. }
  481. if (!n)
  482. break;
  483. /* Now that we've collected a batch, do the tlbies */
  484. if (!local) {
  485. while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
  486. cpu_relax();
  487. asm volatile("ptesync" : : : "memory");
  488. for (k = 0; k < n; ++k)
  489. asm volatile(PPC_TLBIE(%1,%0) : :
  490. "r" (tlbrb[k]),
  491. "r" (kvm->arch.lpid));
  492. asm volatile("eieio; tlbsync; ptesync" : : : "memory");
  493. kvm->arch.tlbie_lock = 0;
  494. } else {
  495. asm volatile("ptesync" : : : "memory");
  496. for (k = 0; k < n; ++k)
  497. asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
  498. asm volatile("ptesync" : : : "memory");
  499. }
  500. /* Read PTE low words after tlbie to get final R/C values */
  501. for (k = 0; k < n; ++k) {
  502. j = indexes[k];
  503. pte_index = args[j] & ((1ul << 56) - 1);
  504. hp = hptes[k];
  505. rev = revs[k];
  506. remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
  507. rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
  508. args[j] |= rcbits << (56 - 5);
  509. hp[0] = 0;
  510. }
  511. }
  512. return ret;
  513. }
  514. long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
  515. unsigned long pte_index, unsigned long avpn,
  516. unsigned long va)
  517. {
  518. struct kvm *kvm = vcpu->kvm;
  519. unsigned long *hpte;
  520. struct revmap_entry *rev;
  521. unsigned long v, r, rb, mask, bits;
  522. if (pte_index >= kvm->arch.hpt_npte)
  523. return H_PARAMETER;
  524. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  525. while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
  526. cpu_relax();
  527. if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
  528. ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
  529. hpte[0] &= ~HPTE_V_HVLOCK;
  530. return H_NOT_FOUND;
  531. }
  532. v = hpte[0];
  533. bits = (flags << 55) & HPTE_R_PP0;
  534. bits |= (flags << 48) & HPTE_R_KEY_HI;
  535. bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
  536. /* Update guest view of 2nd HPTE dword */
  537. mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
  538. HPTE_R_KEY_HI | HPTE_R_KEY_LO;
  539. rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
  540. if (rev) {
  541. r = (rev->guest_rpte & ~mask) | bits;
  542. rev->guest_rpte = r;
  543. note_hpte_modification(kvm, rev);
  544. }
  545. r = (hpte[1] & ~mask) | bits;
  546. /* Update HPTE */
  547. if (v & HPTE_V_VALID) {
  548. rb = compute_tlbie_rb(v, r, pte_index);
  549. hpte[0] = v & ~HPTE_V_VALID;
  550. if (global_invalidates(kvm, flags)) {
  551. while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
  552. cpu_relax();
  553. asm volatile("ptesync" : : : "memory");
  554. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  555. : : "r" (rb), "r" (kvm->arch.lpid));
  556. asm volatile("ptesync" : : : "memory");
  557. kvm->arch.tlbie_lock = 0;
  558. } else {
  559. asm volatile("ptesync" : : : "memory");
  560. asm volatile("tlbiel %0" : : "r" (rb));
  561. asm volatile("ptesync" : : : "memory");
  562. }
  563. /*
  564. * If the host has this page as readonly but the guest
  565. * wants to make it read/write, reduce the permissions.
  566. * Checking the host permissions involves finding the
  567. * memslot and then the Linux PTE for the page.
  568. */
  569. if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
  570. unsigned long psize, gfn, hva;
  571. struct kvm_memory_slot *memslot;
  572. pgd_t *pgdir = vcpu->arch.pgdir;
  573. pte_t pte;
  574. psize = hpte_page_size(v, r);
  575. gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
  576. memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
  577. if (memslot) {
  578. hva = __gfn_to_hva_memslot(memslot, gfn);
  579. pte = lookup_linux_pte(pgdir, hva, 1, &psize);
  580. if (pte_present(pte) && !pte_write(pte))
  581. r = hpte_make_readonly(r);
  582. }
  583. }
  584. }
  585. hpte[1] = r;
  586. eieio();
  587. hpte[0] = v & ~HPTE_V_HVLOCK;
  588. asm volatile("ptesync" : : : "memory");
  589. return H_SUCCESS;
  590. }
  591. long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
  592. unsigned long pte_index)
  593. {
  594. struct kvm *kvm = vcpu->kvm;
  595. unsigned long *hpte, v, r;
  596. int i, n = 1;
  597. struct revmap_entry *rev = NULL;
  598. if (pte_index >= kvm->arch.hpt_npte)
  599. return H_PARAMETER;
  600. if (flags & H_READ_4) {
  601. pte_index &= ~3;
  602. n = 4;
  603. }
  604. rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
  605. for (i = 0; i < n; ++i, ++pte_index) {
  606. hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
  607. v = hpte[0] & ~HPTE_V_HVLOCK;
  608. r = hpte[1];
  609. if (v & HPTE_V_ABSENT) {
  610. v &= ~HPTE_V_ABSENT;
  611. v |= HPTE_V_VALID;
  612. }
  613. if (v & HPTE_V_VALID) {
  614. r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
  615. r &= ~HPTE_GR_RESERVED;
  616. }
  617. vcpu->arch.gpr[4 + i * 2] = v;
  618. vcpu->arch.gpr[5 + i * 2] = r;
  619. }
  620. return H_SUCCESS;
  621. }
  622. void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
  623. unsigned long pte_index)
  624. {
  625. unsigned long rb;
  626. hptep[0] &= ~HPTE_V_VALID;
  627. rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
  628. while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
  629. cpu_relax();
  630. asm volatile("ptesync" : : : "memory");
  631. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  632. : : "r" (rb), "r" (kvm->arch.lpid));
  633. asm volatile("ptesync" : : : "memory");
  634. kvm->arch.tlbie_lock = 0;
  635. }
  636. EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
  637. void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
  638. unsigned long pte_index)
  639. {
  640. unsigned long rb;
  641. unsigned char rbyte;
  642. rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
  643. rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
  644. /* modify only the second-last byte, which contains the ref bit */
  645. *((char *)hptep + 14) = rbyte;
  646. while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
  647. cpu_relax();
  648. asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
  649. : : "r" (rb), "r" (kvm->arch.lpid));
  650. asm volatile("ptesync" : : : "memory");
  651. kvm->arch.tlbie_lock = 0;
  652. }
  653. EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
  654. static int slb_base_page_shift[4] = {
  655. 24, /* 16M */
  656. 16, /* 64k */
  657. 34, /* 16G */
  658. 20, /* 1M, unsupported */
  659. };
  660. long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
  661. unsigned long valid)
  662. {
  663. unsigned int i;
  664. unsigned int pshift;
  665. unsigned long somask;
  666. unsigned long vsid, hash;
  667. unsigned long avpn;
  668. unsigned long *hpte;
  669. unsigned long mask, val;
  670. unsigned long v, r;
  671. /* Get page shift, work out hash and AVPN etc. */
  672. mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
  673. val = 0;
  674. pshift = 12;
  675. if (slb_v & SLB_VSID_L) {
  676. mask |= HPTE_V_LARGE;
  677. val |= HPTE_V_LARGE;
  678. pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
  679. }
  680. if (slb_v & SLB_VSID_B_1T) {
  681. somask = (1UL << 40) - 1;
  682. vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
  683. vsid ^= vsid << 25;
  684. } else {
  685. somask = (1UL << 28) - 1;
  686. vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
  687. }
  688. hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
  689. avpn = slb_v & ~(somask >> 16); /* also includes B */
  690. avpn |= (eaddr & somask) >> 16;
  691. if (pshift >= 24)
  692. avpn &= ~((1UL << (pshift - 16)) - 1);
  693. else
  694. avpn &= ~0x7fUL;
  695. val |= avpn;
  696. for (;;) {
  697. hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
  698. for (i = 0; i < 16; i += 2) {
  699. /* Read the PTE racily */
  700. v = hpte[i] & ~HPTE_V_HVLOCK;
  701. /* Check valid/absent, hash, segment size and AVPN */
  702. if (!(v & valid) || (v & mask) != val)
  703. continue;
  704. /* Lock the PTE and read it under the lock */
  705. while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
  706. cpu_relax();
  707. v = hpte[i] & ~HPTE_V_HVLOCK;
  708. r = hpte[i+1];
  709. /*
  710. * Check the HPTE again, including large page size
  711. * Since we don't currently allow any MPSS (mixed
  712. * page-size segment) page sizes, it is sufficient
  713. * to check against the actual page size.
  714. */
  715. if ((v & valid) && (v & mask) == val &&
  716. hpte_page_size(v, r) == (1ul << pshift))
  717. /* Return with the HPTE still locked */
  718. return (hash << 3) + (i >> 1);
  719. /* Unlock and move on */
  720. hpte[i] = v;
  721. }
  722. if (val & HPTE_V_SECONDARY)
  723. break;
  724. val |= HPTE_V_SECONDARY;
  725. hash = hash ^ kvm->arch.hpt_mask;
  726. }
  727. return -1;
  728. }
  729. EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
  730. /*
  731. * Called in real mode to check whether an HPTE not found fault
  732. * is due to accessing a paged-out page or an emulated MMIO page,
  733. * or if a protection fault is due to accessing a page that the
  734. * guest wanted read/write access to but which we made read-only.
  735. * Returns a possibly modified status (DSISR) value if not
  736. * (i.e. pass the interrupt to the guest),
  737. * -1 to pass the fault up to host kernel mode code, -2 to do that
  738. * and also load the instruction word (for MMIO emulation),
  739. * or 0 if we should make the guest retry the access.
  740. */
  741. long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  742. unsigned long slb_v, unsigned int status, bool data)
  743. {
  744. struct kvm *kvm = vcpu->kvm;
  745. long int index;
  746. unsigned long v, r, gr;
  747. unsigned long *hpte;
  748. unsigned long valid;
  749. struct revmap_entry *rev;
  750. unsigned long pp, key;
  751. /* For protection fault, expect to find a valid HPTE */
  752. valid = HPTE_V_VALID;
  753. if (status & DSISR_NOHPTE)
  754. valid |= HPTE_V_ABSENT;
  755. index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
  756. if (index < 0) {
  757. if (status & DSISR_NOHPTE)
  758. return status; /* there really was no HPTE */
  759. return 0; /* for prot fault, HPTE disappeared */
  760. }
  761. hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
  762. v = hpte[0] & ~HPTE_V_HVLOCK;
  763. r = hpte[1];
  764. rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
  765. gr = rev->guest_rpte;
  766. unlock_hpte(hpte, v);
  767. /* For not found, if the HPTE is valid by now, retry the instruction */
  768. if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
  769. return 0;
  770. /* Check access permissions to the page */
  771. pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
  772. key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
  773. status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
  774. if (!data) {
  775. if (gr & (HPTE_R_N | HPTE_R_G))
  776. return status | SRR1_ISI_N_OR_G;
  777. if (!hpte_read_permission(pp, slb_v & key))
  778. return status | SRR1_ISI_PROT;
  779. } else if (status & DSISR_ISSTORE) {
  780. /* check write permission */
  781. if (!hpte_write_permission(pp, slb_v & key))
  782. return status | DSISR_PROTFAULT;
  783. } else {
  784. if (!hpte_read_permission(pp, slb_v & key))
  785. return status | DSISR_PROTFAULT;
  786. }
  787. /* Check storage key, if applicable */
  788. if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
  789. unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
  790. if (status & DSISR_ISSTORE)
  791. perm >>= 1;
  792. if (perm & 1)
  793. return status | DSISR_KEYFAULT;
  794. }
  795. /* Save HPTE info for virtual-mode handler */
  796. vcpu->arch.pgfault_addr = addr;
  797. vcpu->arch.pgfault_index = index;
  798. vcpu->arch.pgfault_hpte[0] = v;
  799. vcpu->arch.pgfault_hpte[1] = r;
  800. /* Check the storage key to see if it is possibly emulated MMIO */
  801. if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
  802. (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
  803. (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
  804. return -2; /* MMIO emulation - load instr word */
  805. return -1; /* send fault up to host kernel mode */
  806. }