book3s_64_mmu_hv.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. */
  17. #include <linux/types.h>
  18. #include <linux/string.h>
  19. #include <linux/kvm.h>
  20. #include <linux/kvm_host.h>
  21. #include <linux/highmem.h>
  22. #include <linux/gfp.h>
  23. #include <linux/slab.h>
  24. #include <linux/hugetlb.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/srcu.h>
  27. #include <asm/tlbflush.h>
  28. #include <asm/kvm_ppc.h>
  29. #include <asm/kvm_book3s.h>
  30. #include <asm/mmu-hash64.h>
  31. #include <asm/hvcall.h>
  32. #include <asm/synch.h>
  33. #include <asm/ppc-opcode.h>
  34. #include <asm/cputable.h>
  35. /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
  36. #define MAX_LPID_970 63
  37. /* Power architecture requires HPT is at least 256kB */
  38. #define PPC_MIN_HPT_ORDER 18
  39. long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
  40. {
  41. unsigned long hpt;
  42. struct revmap_entry *rev;
  43. struct kvmppc_linear_info *li;
  44. long order = kvm_hpt_order;
  45. if (htab_orderp) {
  46. order = *htab_orderp;
  47. if (order < PPC_MIN_HPT_ORDER)
  48. order = PPC_MIN_HPT_ORDER;
  49. }
  50. /*
  51. * If the user wants a different size from default,
  52. * try first to allocate it from the kernel page allocator.
  53. */
  54. hpt = 0;
  55. if (order != kvm_hpt_order) {
  56. hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
  57. __GFP_NOWARN, order - PAGE_SHIFT);
  58. if (!hpt)
  59. --order;
  60. }
  61. /* Next try to allocate from the preallocated pool */
  62. if (!hpt) {
  63. li = kvm_alloc_hpt();
  64. if (li) {
  65. hpt = (ulong)li->base_virt;
  66. kvm->arch.hpt_li = li;
  67. order = kvm_hpt_order;
  68. }
  69. }
  70. /* Lastly try successively smaller sizes from the page allocator */
  71. while (!hpt && order > PPC_MIN_HPT_ORDER) {
  72. hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
  73. __GFP_NOWARN, order - PAGE_SHIFT);
  74. if (!hpt)
  75. --order;
  76. }
  77. if (!hpt)
  78. return -ENOMEM;
  79. kvm->arch.hpt_virt = hpt;
  80. kvm->arch.hpt_order = order;
  81. /* HPTEs are 2**4 bytes long */
  82. kvm->arch.hpt_npte = 1ul << (order - 4);
  83. /* 128 (2**7) bytes in each HPTEG */
  84. kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
  85. /* Allocate reverse map array */
  86. rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
  87. if (!rev) {
  88. pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
  89. goto out_freehpt;
  90. }
  91. kvm->arch.revmap = rev;
  92. kvm->arch.sdr1 = __pa(hpt) | (order - 18);
  93. pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
  94. hpt, order, kvm->arch.lpid);
  95. if (htab_orderp)
  96. *htab_orderp = order;
  97. return 0;
  98. out_freehpt:
  99. if (kvm->arch.hpt_li)
  100. kvm_release_hpt(kvm->arch.hpt_li);
  101. else
  102. free_pages(hpt, order - PAGE_SHIFT);
  103. return -ENOMEM;
  104. }
  105. long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
  106. {
  107. long err = -EBUSY;
  108. long order;
  109. mutex_lock(&kvm->lock);
  110. if (kvm->arch.rma_setup_done) {
  111. kvm->arch.rma_setup_done = 0;
  112. /* order rma_setup_done vs. vcpus_running */
  113. smp_mb();
  114. if (atomic_read(&kvm->arch.vcpus_running)) {
  115. kvm->arch.rma_setup_done = 1;
  116. goto out;
  117. }
  118. }
  119. if (kvm->arch.hpt_virt) {
  120. order = kvm->arch.hpt_order;
  121. /* Set the entire HPT to 0, i.e. invalid HPTEs */
  122. memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
  123. /*
  124. * Set the whole last_vcpu array to an invalid vcpu number.
  125. * This ensures that each vcpu will flush its TLB on next entry.
  126. */
  127. memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
  128. *htab_orderp = order;
  129. err = 0;
  130. } else {
  131. err = kvmppc_alloc_hpt(kvm, htab_orderp);
  132. order = *htab_orderp;
  133. }
  134. out:
  135. mutex_unlock(&kvm->lock);
  136. return err;
  137. }
  138. void kvmppc_free_hpt(struct kvm *kvm)
  139. {
  140. kvmppc_free_lpid(kvm->arch.lpid);
  141. vfree(kvm->arch.revmap);
  142. if (kvm->arch.hpt_li)
  143. kvm_release_hpt(kvm->arch.hpt_li);
  144. else
  145. free_pages(kvm->arch.hpt_virt,
  146. kvm->arch.hpt_order - PAGE_SHIFT);
  147. }
  148. /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
  149. static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
  150. {
  151. return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
  152. }
  153. /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
  154. static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
  155. {
  156. return (pgsize == 0x10000) ? 0x1000 : 0;
  157. }
  158. void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
  159. unsigned long porder)
  160. {
  161. unsigned long i;
  162. unsigned long npages;
  163. unsigned long hp_v, hp_r;
  164. unsigned long addr, hash;
  165. unsigned long psize;
  166. unsigned long hp0, hp1;
  167. long ret;
  168. struct kvm *kvm = vcpu->kvm;
  169. psize = 1ul << porder;
  170. npages = memslot->npages >> (porder - PAGE_SHIFT);
  171. /* VRMA can't be > 1TB */
  172. if (npages > 1ul << (40 - porder))
  173. npages = 1ul << (40 - porder);
  174. /* Can't use more than 1 HPTE per HPTEG */
  175. if (npages > kvm->arch.hpt_mask + 1)
  176. npages = kvm->arch.hpt_mask + 1;
  177. hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
  178. HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
  179. hp1 = hpte1_pgsize_encoding(psize) |
  180. HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
  181. for (i = 0; i < npages; ++i) {
  182. addr = i << porder;
  183. /* can't use hpt_hash since va > 64 bits */
  184. hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
  185. /*
  186. * We assume that the hash table is empty and no
  187. * vcpus are using it at this stage. Since we create
  188. * at most one HPTE per HPTEG, we just assume entry 7
  189. * is available and use it.
  190. */
  191. hash = (hash << 3) + 7;
  192. hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
  193. hp_r = hp1 | addr;
  194. ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
  195. if (ret != H_SUCCESS) {
  196. pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
  197. addr, ret);
  198. break;
  199. }
  200. }
  201. }
  202. int kvmppc_mmu_hv_init(void)
  203. {
  204. unsigned long host_lpid, rsvd_lpid;
  205. if (!cpu_has_feature(CPU_FTR_HVMODE))
  206. return -EINVAL;
  207. /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
  208. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  209. host_lpid = mfspr(SPRN_LPID); /* POWER7 */
  210. rsvd_lpid = LPID_RSVD;
  211. } else {
  212. host_lpid = 0; /* PPC970 */
  213. rsvd_lpid = MAX_LPID_970;
  214. }
  215. kvmppc_init_lpid(rsvd_lpid + 1);
  216. kvmppc_claim_lpid(host_lpid);
  217. /* rsvd_lpid is reserved for use in partition switching */
  218. kvmppc_claim_lpid(rsvd_lpid);
  219. return 0;
  220. }
  221. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  222. {
  223. }
  224. static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
  225. {
  226. kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
  227. }
  228. /*
  229. * This is called to get a reference to a guest page if there isn't
  230. * one already in the kvm->arch.slot_phys[][] arrays.
  231. */
  232. static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
  233. struct kvm_memory_slot *memslot,
  234. unsigned long psize)
  235. {
  236. unsigned long start;
  237. long np, err;
  238. struct page *page, *hpage, *pages[1];
  239. unsigned long s, pgsize;
  240. unsigned long *physp;
  241. unsigned int is_io, got, pgorder;
  242. struct vm_area_struct *vma;
  243. unsigned long pfn, i, npages;
  244. physp = kvm->arch.slot_phys[memslot->id];
  245. if (!physp)
  246. return -EINVAL;
  247. if (physp[gfn - memslot->base_gfn])
  248. return 0;
  249. is_io = 0;
  250. got = 0;
  251. page = NULL;
  252. pgsize = psize;
  253. err = -EINVAL;
  254. start = gfn_to_hva_memslot(memslot, gfn);
  255. /* Instantiate and get the page we want access to */
  256. np = get_user_pages_fast(start, 1, 1, pages);
  257. if (np != 1) {
  258. /* Look up the vma for the page */
  259. down_read(&current->mm->mmap_sem);
  260. vma = find_vma(current->mm, start);
  261. if (!vma || vma->vm_start > start ||
  262. start + psize > vma->vm_end ||
  263. !(vma->vm_flags & VM_PFNMAP))
  264. goto up_err;
  265. is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
  266. pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  267. /* check alignment of pfn vs. requested page size */
  268. if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
  269. goto up_err;
  270. up_read(&current->mm->mmap_sem);
  271. } else {
  272. page = pages[0];
  273. got = KVMPPC_GOT_PAGE;
  274. /* See if this is a large page */
  275. s = PAGE_SIZE;
  276. if (PageHuge(page)) {
  277. hpage = compound_head(page);
  278. s <<= compound_order(hpage);
  279. /* Get the whole large page if slot alignment is ok */
  280. if (s > psize && slot_is_aligned(memslot, s) &&
  281. !(memslot->userspace_addr & (s - 1))) {
  282. start &= ~(s - 1);
  283. pgsize = s;
  284. get_page(hpage);
  285. put_page(page);
  286. page = hpage;
  287. }
  288. }
  289. if (s < psize)
  290. goto out;
  291. pfn = page_to_pfn(page);
  292. }
  293. npages = pgsize >> PAGE_SHIFT;
  294. pgorder = __ilog2(npages);
  295. physp += (gfn - memslot->base_gfn) & ~(npages - 1);
  296. spin_lock(&kvm->arch.slot_phys_lock);
  297. for (i = 0; i < npages; ++i) {
  298. if (!physp[i]) {
  299. physp[i] = ((pfn + i) << PAGE_SHIFT) +
  300. got + is_io + pgorder;
  301. got = 0;
  302. }
  303. }
  304. spin_unlock(&kvm->arch.slot_phys_lock);
  305. err = 0;
  306. out:
  307. if (got)
  308. put_page(page);
  309. return err;
  310. up_err:
  311. up_read(&current->mm->mmap_sem);
  312. return err;
  313. }
  314. /*
  315. * We come here on a H_ENTER call from the guest when we are not
  316. * using mmu notifiers and we don't have the requested page pinned
  317. * already.
  318. */
  319. long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  320. long pte_index, unsigned long pteh, unsigned long ptel)
  321. {
  322. struct kvm *kvm = vcpu->kvm;
  323. unsigned long psize, gpa, gfn;
  324. struct kvm_memory_slot *memslot;
  325. long ret;
  326. if (kvm->arch.using_mmu_notifiers)
  327. goto do_insert;
  328. psize = hpte_page_size(pteh, ptel);
  329. if (!psize)
  330. return H_PARAMETER;
  331. pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
  332. /* Find the memslot (if any) for this address */
  333. gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
  334. gfn = gpa >> PAGE_SHIFT;
  335. memslot = gfn_to_memslot(kvm, gfn);
  336. if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
  337. if (!slot_is_aligned(memslot, psize))
  338. return H_PARAMETER;
  339. if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
  340. return H_PARAMETER;
  341. }
  342. do_insert:
  343. /* Protect linux PTE lookup from page table destruction */
  344. rcu_read_lock_sched(); /* this disables preemption too */
  345. vcpu->arch.pgdir = current->mm->pgd;
  346. ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
  347. rcu_read_unlock_sched();
  348. if (ret == H_TOO_HARD) {
  349. /* this can't happen */
  350. pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
  351. ret = H_RESOURCE; /* or something */
  352. }
  353. return ret;
  354. }
  355. static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
  356. gva_t eaddr)
  357. {
  358. u64 mask;
  359. int i;
  360. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  361. if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
  362. continue;
  363. if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
  364. mask = ESID_MASK_1T;
  365. else
  366. mask = ESID_MASK;
  367. if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
  368. return &vcpu->arch.slb[i];
  369. }
  370. return NULL;
  371. }
  372. static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
  373. unsigned long ea)
  374. {
  375. unsigned long ra_mask;
  376. ra_mask = hpte_page_size(v, r) - 1;
  377. return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
  378. }
  379. static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
  380. struct kvmppc_pte *gpte, bool data)
  381. {
  382. struct kvm *kvm = vcpu->kvm;
  383. struct kvmppc_slb *slbe;
  384. unsigned long slb_v;
  385. unsigned long pp, key;
  386. unsigned long v, gr;
  387. unsigned long *hptep;
  388. int index;
  389. int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
  390. /* Get SLB entry */
  391. if (virtmode) {
  392. slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
  393. if (!slbe)
  394. return -EINVAL;
  395. slb_v = slbe->origv;
  396. } else {
  397. /* real mode access */
  398. slb_v = vcpu->kvm->arch.vrma_slb_v;
  399. }
  400. /* Find the HPTE in the hash table */
  401. index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
  402. HPTE_V_VALID | HPTE_V_ABSENT);
  403. if (index < 0)
  404. return -ENOENT;
  405. hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
  406. v = hptep[0] & ~HPTE_V_HVLOCK;
  407. gr = kvm->arch.revmap[index].guest_rpte;
  408. /* Unlock the HPTE */
  409. asm volatile("lwsync" : : : "memory");
  410. hptep[0] = v;
  411. gpte->eaddr = eaddr;
  412. gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
  413. /* Get PP bits and key for permission check */
  414. pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
  415. key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
  416. key &= slb_v;
  417. /* Calculate permissions */
  418. gpte->may_read = hpte_read_permission(pp, key);
  419. gpte->may_write = hpte_write_permission(pp, key);
  420. gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
  421. /* Storage key permission check for POWER7 */
  422. if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
  423. int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
  424. if (amrfield & 1)
  425. gpte->may_read = 0;
  426. if (amrfield & 2)
  427. gpte->may_write = 0;
  428. }
  429. /* Get the guest physical address */
  430. gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
  431. return 0;
  432. }
  433. /*
  434. * Quick test for whether an instruction is a load or a store.
  435. * If the instruction is a load or a store, then this will indicate
  436. * which it is, at least on server processors. (Embedded processors
  437. * have some external PID instructions that don't follow the rule
  438. * embodied here.) If the instruction isn't a load or store, then
  439. * this doesn't return anything useful.
  440. */
  441. static int instruction_is_store(unsigned int instr)
  442. {
  443. unsigned int mask;
  444. mask = 0x10000000;
  445. if ((instr & 0xfc000000) == 0x7c000000)
  446. mask = 0x100; /* major opcode 31 */
  447. return (instr & mask) != 0;
  448. }
  449. static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
  450. unsigned long gpa, gva_t ea, int is_store)
  451. {
  452. int ret;
  453. u32 last_inst;
  454. unsigned long srr0 = kvmppc_get_pc(vcpu);
  455. /* We try to load the last instruction. We don't let
  456. * emulate_instruction do it as it doesn't check what
  457. * kvmppc_ld returns.
  458. * If we fail, we just return to the guest and try executing it again.
  459. */
  460. if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
  461. ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
  462. if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
  463. return RESUME_GUEST;
  464. vcpu->arch.last_inst = last_inst;
  465. }
  466. /*
  467. * WARNING: We do not know for sure whether the instruction we just
  468. * read from memory is the same that caused the fault in the first
  469. * place. If the instruction we read is neither an load or a store,
  470. * then it can't access memory, so we don't need to worry about
  471. * enforcing access permissions. So, assuming it is a load or
  472. * store, we just check that its direction (load or store) is
  473. * consistent with the original fault, since that's what we
  474. * checked the access permissions against. If there is a mismatch
  475. * we just return and retry the instruction.
  476. */
  477. if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
  478. return RESUME_GUEST;
  479. /*
  480. * Emulated accesses are emulated by looking at the hash for
  481. * translation once, then performing the access later. The
  482. * translation could be invalidated in the meantime in which
  483. * point performing the subsequent memory access on the old
  484. * physical address could possibly be a security hole for the
  485. * guest (but not the host).
  486. *
  487. * This is less of an issue for MMIO stores since they aren't
  488. * globally visible. It could be an issue for MMIO loads to
  489. * a certain extent but we'll ignore it for now.
  490. */
  491. vcpu->arch.paddr_accessed = gpa;
  492. vcpu->arch.vaddr_accessed = ea;
  493. return kvmppc_emulate_mmio(run, vcpu);
  494. }
  495. int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
  496. unsigned long ea, unsigned long dsisr)
  497. {
  498. struct kvm *kvm = vcpu->kvm;
  499. unsigned long *hptep, hpte[3], r;
  500. unsigned long mmu_seq, psize, pte_size;
  501. unsigned long gfn, hva, pfn;
  502. struct kvm_memory_slot *memslot;
  503. unsigned long *rmap;
  504. struct revmap_entry *rev;
  505. struct page *page, *pages[1];
  506. long index, ret, npages;
  507. unsigned long is_io;
  508. unsigned int writing, write_ok;
  509. struct vm_area_struct *vma;
  510. unsigned long rcbits;
  511. /*
  512. * Real-mode code has already searched the HPT and found the
  513. * entry we're interested in. Lock the entry and check that
  514. * it hasn't changed. If it has, just return and re-execute the
  515. * instruction.
  516. */
  517. if (ea != vcpu->arch.pgfault_addr)
  518. return RESUME_GUEST;
  519. index = vcpu->arch.pgfault_index;
  520. hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
  521. rev = &kvm->arch.revmap[index];
  522. preempt_disable();
  523. while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
  524. cpu_relax();
  525. hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
  526. hpte[1] = hptep[1];
  527. hpte[2] = r = rev->guest_rpte;
  528. asm volatile("lwsync" : : : "memory");
  529. hptep[0] = hpte[0];
  530. preempt_enable();
  531. if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
  532. hpte[1] != vcpu->arch.pgfault_hpte[1])
  533. return RESUME_GUEST;
  534. /* Translate the logical address and get the page */
  535. psize = hpte_page_size(hpte[0], r);
  536. gfn = hpte_rpn(r, psize);
  537. memslot = gfn_to_memslot(kvm, gfn);
  538. /* No memslot means it's an emulated MMIO region */
  539. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
  540. unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
  541. return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
  542. dsisr & DSISR_ISSTORE);
  543. }
  544. if (!kvm->arch.using_mmu_notifiers)
  545. return -EFAULT; /* should never get here */
  546. /* used to check for invalidations in progress */
  547. mmu_seq = kvm->mmu_notifier_seq;
  548. smp_rmb();
  549. is_io = 0;
  550. pfn = 0;
  551. page = NULL;
  552. pte_size = PAGE_SIZE;
  553. writing = (dsisr & DSISR_ISSTORE) != 0;
  554. /* If writing != 0, then the HPTE must allow writing, if we get here */
  555. write_ok = writing;
  556. hva = gfn_to_hva_memslot(memslot, gfn);
  557. npages = get_user_pages_fast(hva, 1, writing, pages);
  558. if (npages < 1) {
  559. /* Check if it's an I/O mapping */
  560. down_read(&current->mm->mmap_sem);
  561. vma = find_vma(current->mm, hva);
  562. if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
  563. (vma->vm_flags & VM_PFNMAP)) {
  564. pfn = vma->vm_pgoff +
  565. ((hva - vma->vm_start) >> PAGE_SHIFT);
  566. pte_size = psize;
  567. is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
  568. write_ok = vma->vm_flags & VM_WRITE;
  569. }
  570. up_read(&current->mm->mmap_sem);
  571. if (!pfn)
  572. return -EFAULT;
  573. } else {
  574. page = pages[0];
  575. if (PageHuge(page)) {
  576. page = compound_head(page);
  577. pte_size <<= compound_order(page);
  578. }
  579. /* if the guest wants write access, see if that is OK */
  580. if (!writing && hpte_is_writable(r)) {
  581. pte_t *ptep, pte;
  582. /*
  583. * We need to protect against page table destruction
  584. * while looking up and updating the pte.
  585. */
  586. rcu_read_lock_sched();
  587. ptep = find_linux_pte_or_hugepte(current->mm->pgd,
  588. hva, NULL);
  589. if (ptep && pte_present(*ptep)) {
  590. pte = kvmppc_read_update_linux_pte(ptep, 1);
  591. if (pte_write(pte))
  592. write_ok = 1;
  593. }
  594. rcu_read_unlock_sched();
  595. }
  596. pfn = page_to_pfn(page);
  597. }
  598. ret = -EFAULT;
  599. if (psize > pte_size)
  600. goto out_put;
  601. /* Check WIMG vs. the actual page we're accessing */
  602. if (!hpte_cache_flags_ok(r, is_io)) {
  603. if (is_io)
  604. return -EFAULT;
  605. /*
  606. * Allow guest to map emulated device memory as
  607. * uncacheable, but actually make it cacheable.
  608. */
  609. r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
  610. }
  611. /* Set the HPTE to point to pfn */
  612. r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
  613. if (hpte_is_writable(r) && !write_ok)
  614. r = hpte_make_readonly(r);
  615. ret = RESUME_GUEST;
  616. preempt_disable();
  617. while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
  618. cpu_relax();
  619. if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
  620. rev->guest_rpte != hpte[2])
  621. /* HPTE has been changed under us; let the guest retry */
  622. goto out_unlock;
  623. hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
  624. rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
  625. lock_rmap(rmap);
  626. /* Check if we might have been invalidated; let the guest retry if so */
  627. ret = RESUME_GUEST;
  628. if (mmu_notifier_retry(vcpu, mmu_seq)) {
  629. unlock_rmap(rmap);
  630. goto out_unlock;
  631. }
  632. /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
  633. rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
  634. r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
  635. if (hptep[0] & HPTE_V_VALID) {
  636. /* HPTE was previously valid, so we need to invalidate it */
  637. unlock_rmap(rmap);
  638. hptep[0] |= HPTE_V_ABSENT;
  639. kvmppc_invalidate_hpte(kvm, hptep, index);
  640. /* don't lose previous R and C bits */
  641. r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
  642. } else {
  643. kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
  644. }
  645. hptep[1] = r;
  646. eieio();
  647. hptep[0] = hpte[0];
  648. asm volatile("ptesync" : : : "memory");
  649. preempt_enable();
  650. if (page && hpte_is_writable(r))
  651. SetPageDirty(page);
  652. out_put:
  653. if (page) {
  654. /*
  655. * We drop pages[0] here, not page because page might
  656. * have been set to the head page of a compound, but
  657. * we have to drop the reference on the correct tail
  658. * page to match the get inside gup()
  659. */
  660. put_page(pages[0]);
  661. }
  662. return ret;
  663. out_unlock:
  664. hptep[0] &= ~HPTE_V_HVLOCK;
  665. preempt_enable();
  666. goto out_put;
  667. }
  668. static int kvm_handle_hva_range(struct kvm *kvm,
  669. unsigned long start,
  670. unsigned long end,
  671. int (*handler)(struct kvm *kvm,
  672. unsigned long *rmapp,
  673. unsigned long gfn))
  674. {
  675. int ret;
  676. int retval = 0;
  677. struct kvm_memslots *slots;
  678. struct kvm_memory_slot *memslot;
  679. slots = kvm_memslots(kvm);
  680. kvm_for_each_memslot(memslot, slots) {
  681. unsigned long hva_start, hva_end;
  682. gfn_t gfn, gfn_end;
  683. hva_start = max(start, memslot->userspace_addr);
  684. hva_end = min(end, memslot->userspace_addr +
  685. (memslot->npages << PAGE_SHIFT));
  686. if (hva_start >= hva_end)
  687. continue;
  688. /*
  689. * {gfn(page) | page intersects with [hva_start, hva_end)} =
  690. * {gfn, gfn+1, ..., gfn_end-1}.
  691. */
  692. gfn = hva_to_gfn_memslot(hva_start, memslot);
  693. gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
  694. for (; gfn < gfn_end; ++gfn) {
  695. gfn_t gfn_offset = gfn - memslot->base_gfn;
  696. ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
  697. retval |= ret;
  698. }
  699. }
  700. return retval;
  701. }
  702. static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
  703. int (*handler)(struct kvm *kvm, unsigned long *rmapp,
  704. unsigned long gfn))
  705. {
  706. return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
  707. }
  708. static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
  709. unsigned long gfn)
  710. {
  711. struct revmap_entry *rev = kvm->arch.revmap;
  712. unsigned long h, i, j;
  713. unsigned long *hptep;
  714. unsigned long ptel, psize, rcbits;
  715. for (;;) {
  716. lock_rmap(rmapp);
  717. if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
  718. unlock_rmap(rmapp);
  719. break;
  720. }
  721. /*
  722. * To avoid an ABBA deadlock with the HPTE lock bit,
  723. * we can't spin on the HPTE lock while holding the
  724. * rmap chain lock.
  725. */
  726. i = *rmapp & KVMPPC_RMAP_INDEX;
  727. hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
  728. if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
  729. /* unlock rmap before spinning on the HPTE lock */
  730. unlock_rmap(rmapp);
  731. while (hptep[0] & HPTE_V_HVLOCK)
  732. cpu_relax();
  733. continue;
  734. }
  735. j = rev[i].forw;
  736. if (j == i) {
  737. /* chain is now empty */
  738. *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
  739. } else {
  740. /* remove i from chain */
  741. h = rev[i].back;
  742. rev[h].forw = j;
  743. rev[j].back = h;
  744. rev[i].forw = rev[i].back = i;
  745. *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
  746. }
  747. /* Now check and modify the HPTE */
  748. ptel = rev[i].guest_rpte;
  749. psize = hpte_page_size(hptep[0], ptel);
  750. if ((hptep[0] & HPTE_V_VALID) &&
  751. hpte_rpn(ptel, psize) == gfn) {
  752. hptep[0] |= HPTE_V_ABSENT;
  753. kvmppc_invalidate_hpte(kvm, hptep, i);
  754. /* Harvest R and C */
  755. rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
  756. *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
  757. rev[i].guest_rpte = ptel | rcbits;
  758. }
  759. unlock_rmap(rmapp);
  760. hptep[0] &= ~HPTE_V_HVLOCK;
  761. }
  762. return 0;
  763. }
  764. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  765. {
  766. if (kvm->arch.using_mmu_notifiers)
  767. kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
  768. return 0;
  769. }
  770. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  771. {
  772. if (kvm->arch.using_mmu_notifiers)
  773. kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
  774. return 0;
  775. }
  776. static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
  777. unsigned long gfn)
  778. {
  779. struct revmap_entry *rev = kvm->arch.revmap;
  780. unsigned long head, i, j;
  781. unsigned long *hptep;
  782. int ret = 0;
  783. retry:
  784. lock_rmap(rmapp);
  785. if (*rmapp & KVMPPC_RMAP_REFERENCED) {
  786. *rmapp &= ~KVMPPC_RMAP_REFERENCED;
  787. ret = 1;
  788. }
  789. if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
  790. unlock_rmap(rmapp);
  791. return ret;
  792. }
  793. i = head = *rmapp & KVMPPC_RMAP_INDEX;
  794. do {
  795. hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
  796. j = rev[i].forw;
  797. /* If this HPTE isn't referenced, ignore it */
  798. if (!(hptep[1] & HPTE_R_R))
  799. continue;
  800. if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
  801. /* unlock rmap before spinning on the HPTE lock */
  802. unlock_rmap(rmapp);
  803. while (hptep[0] & HPTE_V_HVLOCK)
  804. cpu_relax();
  805. goto retry;
  806. }
  807. /* Now check and modify the HPTE */
  808. if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
  809. kvmppc_clear_ref_hpte(kvm, hptep, i);
  810. rev[i].guest_rpte |= HPTE_R_R;
  811. ret = 1;
  812. }
  813. hptep[0] &= ~HPTE_V_HVLOCK;
  814. } while ((i = j) != head);
  815. unlock_rmap(rmapp);
  816. return ret;
  817. }
  818. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  819. {
  820. if (!kvm->arch.using_mmu_notifiers)
  821. return 0;
  822. return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
  823. }
  824. static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
  825. unsigned long gfn)
  826. {
  827. struct revmap_entry *rev = kvm->arch.revmap;
  828. unsigned long head, i, j;
  829. unsigned long *hp;
  830. int ret = 1;
  831. if (*rmapp & KVMPPC_RMAP_REFERENCED)
  832. return 1;
  833. lock_rmap(rmapp);
  834. if (*rmapp & KVMPPC_RMAP_REFERENCED)
  835. goto out;
  836. if (*rmapp & KVMPPC_RMAP_PRESENT) {
  837. i = head = *rmapp & KVMPPC_RMAP_INDEX;
  838. do {
  839. hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
  840. j = rev[i].forw;
  841. if (hp[1] & HPTE_R_R)
  842. goto out;
  843. } while ((i = j) != head);
  844. }
  845. ret = 0;
  846. out:
  847. unlock_rmap(rmapp);
  848. return ret;
  849. }
  850. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  851. {
  852. if (!kvm->arch.using_mmu_notifiers)
  853. return 0;
  854. return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
  855. }
  856. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  857. {
  858. if (!kvm->arch.using_mmu_notifiers)
  859. return;
  860. kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
  861. }
  862. static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
  863. {
  864. struct revmap_entry *rev = kvm->arch.revmap;
  865. unsigned long head, i, j;
  866. unsigned long *hptep;
  867. int ret = 0;
  868. retry:
  869. lock_rmap(rmapp);
  870. if (*rmapp & KVMPPC_RMAP_CHANGED) {
  871. *rmapp &= ~KVMPPC_RMAP_CHANGED;
  872. ret = 1;
  873. }
  874. if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
  875. unlock_rmap(rmapp);
  876. return ret;
  877. }
  878. i = head = *rmapp & KVMPPC_RMAP_INDEX;
  879. do {
  880. hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
  881. j = rev[i].forw;
  882. if (!(hptep[1] & HPTE_R_C))
  883. continue;
  884. if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
  885. /* unlock rmap before spinning on the HPTE lock */
  886. unlock_rmap(rmapp);
  887. while (hptep[0] & HPTE_V_HVLOCK)
  888. cpu_relax();
  889. goto retry;
  890. }
  891. /* Now check and modify the HPTE */
  892. if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
  893. /* need to make it temporarily absent to clear C */
  894. hptep[0] |= HPTE_V_ABSENT;
  895. kvmppc_invalidate_hpte(kvm, hptep, i);
  896. hptep[1] &= ~HPTE_R_C;
  897. eieio();
  898. hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
  899. rev[i].guest_rpte |= HPTE_R_C;
  900. ret = 1;
  901. }
  902. hptep[0] &= ~HPTE_V_HVLOCK;
  903. } while ((i = j) != head);
  904. unlock_rmap(rmapp);
  905. return ret;
  906. }
  907. long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
  908. {
  909. unsigned long i;
  910. unsigned long *rmapp, *map;
  911. preempt_disable();
  912. rmapp = memslot->arch.rmap;
  913. map = memslot->dirty_bitmap;
  914. for (i = 0; i < memslot->npages; ++i) {
  915. if (kvm_test_clear_dirty(kvm, rmapp))
  916. __set_bit_le(i, map);
  917. ++rmapp;
  918. }
  919. preempt_enable();
  920. return 0;
  921. }
  922. void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
  923. unsigned long *nb_ret)
  924. {
  925. struct kvm_memory_slot *memslot;
  926. unsigned long gfn = gpa >> PAGE_SHIFT;
  927. struct page *page, *pages[1];
  928. int npages;
  929. unsigned long hva, psize, offset;
  930. unsigned long pa;
  931. unsigned long *physp;
  932. int srcu_idx;
  933. srcu_idx = srcu_read_lock(&kvm->srcu);
  934. memslot = gfn_to_memslot(kvm, gfn);
  935. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
  936. goto err;
  937. if (!kvm->arch.using_mmu_notifiers) {
  938. physp = kvm->arch.slot_phys[memslot->id];
  939. if (!physp)
  940. goto err;
  941. physp += gfn - memslot->base_gfn;
  942. pa = *physp;
  943. if (!pa) {
  944. if (kvmppc_get_guest_page(kvm, gfn, memslot,
  945. PAGE_SIZE) < 0)
  946. goto err;
  947. pa = *physp;
  948. }
  949. page = pfn_to_page(pa >> PAGE_SHIFT);
  950. get_page(page);
  951. } else {
  952. hva = gfn_to_hva_memslot(memslot, gfn);
  953. npages = get_user_pages_fast(hva, 1, 1, pages);
  954. if (npages < 1)
  955. goto err;
  956. page = pages[0];
  957. }
  958. srcu_read_unlock(&kvm->srcu, srcu_idx);
  959. psize = PAGE_SIZE;
  960. if (PageHuge(page)) {
  961. page = compound_head(page);
  962. psize <<= compound_order(page);
  963. }
  964. offset = gpa & (psize - 1);
  965. if (nb_ret)
  966. *nb_ret = psize - offset;
  967. return page_address(page) + offset;
  968. err:
  969. srcu_read_unlock(&kvm->srcu, srcu_idx);
  970. return NULL;
  971. }
  972. void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
  973. {
  974. struct page *page = virt_to_page(va);
  975. put_page(page);
  976. }
  977. void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
  978. {
  979. struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
  980. if (cpu_has_feature(CPU_FTR_ARCH_206))
  981. vcpu->arch.slb_nr = 32; /* POWER7 */
  982. else
  983. vcpu->arch.slb_nr = 64;
  984. mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
  985. mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
  986. vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
  987. }