book3s_64_mmu_hv.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. */
  17. #include <linux/types.h>
  18. #include <linux/string.h>
  19. #include <linux/kvm.h>
  20. #include <linux/kvm_host.h>
  21. #include <linux/highmem.h>
  22. #include <linux/gfp.h>
  23. #include <linux/slab.h>
  24. #include <linux/hugetlb.h>
  25. #include <linux/vmalloc.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/kvm_ppc.h>
  28. #include <asm/kvm_book3s.h>
  29. #include <asm/mmu-hash64.h>
  30. #include <asm/hvcall.h>
  31. #include <asm/synch.h>
  32. #include <asm/ppc-opcode.h>
  33. #include <asm/cputable.h>
  34. /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
  35. #define MAX_LPID_970 63
  36. #define NR_LPIDS (LPID_RSVD + 1)
  37. unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
  38. long kvmppc_alloc_hpt(struct kvm *kvm)
  39. {
  40. unsigned long hpt;
  41. unsigned long lpid;
  42. struct revmap_entry *rev;
  43. /* Allocate guest's hashed page table */
  44. hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
  45. HPT_ORDER - PAGE_SHIFT);
  46. if (!hpt) {
  47. pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
  48. return -ENOMEM;
  49. }
  50. kvm->arch.hpt_virt = hpt;
  51. /* Allocate reverse map array */
  52. rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
  53. if (!rev) {
  54. pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
  55. goto out_freehpt;
  56. }
  57. kvm->arch.revmap = rev;
  58. /* Allocate the guest's logical partition ID */
  59. do {
  60. lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
  61. if (lpid >= NR_LPIDS) {
  62. pr_err("kvm_alloc_hpt: No LPIDs free\n");
  63. goto out_freeboth;
  64. }
  65. } while (test_and_set_bit(lpid, lpid_inuse));
  66. kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
  67. kvm->arch.lpid = lpid;
  68. pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
  69. return 0;
  70. out_freeboth:
  71. vfree(rev);
  72. out_freehpt:
  73. free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
  74. return -ENOMEM;
  75. }
  76. void kvmppc_free_hpt(struct kvm *kvm)
  77. {
  78. clear_bit(kvm->arch.lpid, lpid_inuse);
  79. vfree(kvm->arch.revmap);
  80. free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
  81. }
  82. /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
  83. static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
  84. {
  85. return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
  86. }
  87. /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
  88. static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
  89. {
  90. return (pgsize == 0x10000) ? 0x1000 : 0;
  91. }
  92. void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
  93. unsigned long porder)
  94. {
  95. unsigned long i;
  96. unsigned long npages;
  97. unsigned long hp_v, hp_r;
  98. unsigned long addr, hash;
  99. unsigned long psize;
  100. unsigned long hp0, hp1;
  101. long ret;
  102. psize = 1ul << porder;
  103. npages = memslot->npages >> (porder - PAGE_SHIFT);
  104. /* VRMA can't be > 1TB */
  105. if (npages > 1ul << (40 - porder))
  106. npages = 1ul << (40 - porder);
  107. /* Can't use more than 1 HPTE per HPTEG */
  108. if (npages > HPT_NPTEG)
  109. npages = HPT_NPTEG;
  110. hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
  111. HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
  112. hp1 = hpte1_pgsize_encoding(psize) |
  113. HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
  114. for (i = 0; i < npages; ++i) {
  115. addr = i << porder;
  116. /* can't use hpt_hash since va > 64 bits */
  117. hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
  118. /*
  119. * We assume that the hash table is empty and no
  120. * vcpus are using it at this stage. Since we create
  121. * at most one HPTE per HPTEG, we just assume entry 7
  122. * is available and use it.
  123. */
  124. hash = (hash << 3) + 7;
  125. hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
  126. hp_r = hp1 | addr;
  127. ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
  128. if (ret != H_SUCCESS) {
  129. pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
  130. addr, ret);
  131. break;
  132. }
  133. }
  134. }
  135. int kvmppc_mmu_hv_init(void)
  136. {
  137. unsigned long host_lpid, rsvd_lpid;
  138. if (!cpu_has_feature(CPU_FTR_HVMODE))
  139. return -EINVAL;
  140. memset(lpid_inuse, 0, sizeof(lpid_inuse));
  141. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  142. host_lpid = mfspr(SPRN_LPID); /* POWER7 */
  143. rsvd_lpid = LPID_RSVD;
  144. } else {
  145. host_lpid = 0; /* PPC970 */
  146. rsvd_lpid = MAX_LPID_970;
  147. }
  148. set_bit(host_lpid, lpid_inuse);
  149. /* rsvd_lpid is reserved for use in partition switching */
  150. set_bit(rsvd_lpid, lpid_inuse);
  151. return 0;
  152. }
  153. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  154. {
  155. }
  156. static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
  157. {
  158. kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
  159. }
  160. /*
  161. * This is called to get a reference to a guest page if there isn't
  162. * one already in the kvm->arch.slot_phys[][] arrays.
  163. */
  164. static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
  165. struct kvm_memory_slot *memslot,
  166. unsigned long psize)
  167. {
  168. unsigned long start;
  169. long np, err;
  170. struct page *page, *hpage, *pages[1];
  171. unsigned long s, pgsize;
  172. unsigned long *physp;
  173. unsigned int is_io, got, pgorder;
  174. struct vm_area_struct *vma;
  175. unsigned long pfn, i, npages;
  176. physp = kvm->arch.slot_phys[memslot->id];
  177. if (!physp)
  178. return -EINVAL;
  179. if (physp[gfn - memslot->base_gfn])
  180. return 0;
  181. is_io = 0;
  182. got = 0;
  183. page = NULL;
  184. pgsize = psize;
  185. err = -EINVAL;
  186. start = gfn_to_hva_memslot(memslot, gfn);
  187. /* Instantiate and get the page we want access to */
  188. np = get_user_pages_fast(start, 1, 1, pages);
  189. if (np != 1) {
  190. /* Look up the vma for the page */
  191. down_read(&current->mm->mmap_sem);
  192. vma = find_vma(current->mm, start);
  193. if (!vma || vma->vm_start > start ||
  194. start + psize > vma->vm_end ||
  195. !(vma->vm_flags & VM_PFNMAP))
  196. goto up_err;
  197. is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
  198. pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  199. /* check alignment of pfn vs. requested page size */
  200. if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
  201. goto up_err;
  202. up_read(&current->mm->mmap_sem);
  203. } else {
  204. page = pages[0];
  205. got = KVMPPC_GOT_PAGE;
  206. /* See if this is a large page */
  207. s = PAGE_SIZE;
  208. if (PageHuge(page)) {
  209. hpage = compound_head(page);
  210. s <<= compound_order(hpage);
  211. /* Get the whole large page if slot alignment is ok */
  212. if (s > psize && slot_is_aligned(memslot, s) &&
  213. !(memslot->userspace_addr & (s - 1))) {
  214. start &= ~(s - 1);
  215. pgsize = s;
  216. page = hpage;
  217. }
  218. }
  219. if (s < psize)
  220. goto out;
  221. pfn = page_to_pfn(page);
  222. }
  223. npages = pgsize >> PAGE_SHIFT;
  224. pgorder = __ilog2(npages);
  225. physp += (gfn - memslot->base_gfn) & ~(npages - 1);
  226. spin_lock(&kvm->arch.slot_phys_lock);
  227. for (i = 0; i < npages; ++i) {
  228. if (!physp[i]) {
  229. physp[i] = ((pfn + i) << PAGE_SHIFT) +
  230. got + is_io + pgorder;
  231. got = 0;
  232. }
  233. }
  234. spin_unlock(&kvm->arch.slot_phys_lock);
  235. err = 0;
  236. out:
  237. if (got) {
  238. if (PageHuge(page))
  239. page = compound_head(page);
  240. put_page(page);
  241. }
  242. return err;
  243. up_err:
  244. up_read(&current->mm->mmap_sem);
  245. return err;
  246. }
  247. /*
  248. * We come here on a H_ENTER call from the guest when
  249. * we don't have the requested page pinned already.
  250. */
  251. long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
  252. long pte_index, unsigned long pteh, unsigned long ptel)
  253. {
  254. struct kvm *kvm = vcpu->kvm;
  255. unsigned long psize, gpa, gfn;
  256. struct kvm_memory_slot *memslot;
  257. long ret;
  258. psize = hpte_page_size(pteh, ptel);
  259. if (!psize)
  260. return H_PARAMETER;
  261. pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
  262. /* Find the memslot (if any) for this address */
  263. gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
  264. gfn = gpa >> PAGE_SHIFT;
  265. memslot = gfn_to_memslot(kvm, gfn);
  266. if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
  267. if (!slot_is_aligned(memslot, psize))
  268. return H_PARAMETER;
  269. if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
  270. return H_PARAMETER;
  271. }
  272. preempt_disable();
  273. ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
  274. preempt_enable();
  275. if (ret == H_TOO_HARD) {
  276. /* this can't happen */
  277. pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
  278. ret = H_RESOURCE; /* or something */
  279. }
  280. return ret;
  281. }
  282. static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
  283. gva_t eaddr)
  284. {
  285. u64 mask;
  286. int i;
  287. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  288. if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
  289. continue;
  290. if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
  291. mask = ESID_MASK_1T;
  292. else
  293. mask = ESID_MASK;
  294. if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
  295. return &vcpu->arch.slb[i];
  296. }
  297. return NULL;
  298. }
  299. static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
  300. unsigned long ea)
  301. {
  302. unsigned long ra_mask;
  303. ra_mask = hpte_page_size(v, r) - 1;
  304. return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
  305. }
  306. static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
  307. struct kvmppc_pte *gpte, bool data)
  308. {
  309. struct kvm *kvm = vcpu->kvm;
  310. struct kvmppc_slb *slbe;
  311. unsigned long slb_v;
  312. unsigned long pp, key;
  313. unsigned long v, gr;
  314. unsigned long *hptep;
  315. int index;
  316. int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
  317. /* Get SLB entry */
  318. if (virtmode) {
  319. slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
  320. if (!slbe)
  321. return -EINVAL;
  322. slb_v = slbe->origv;
  323. } else {
  324. /* real mode access */
  325. slb_v = vcpu->kvm->arch.vrma_slb_v;
  326. }
  327. /* Find the HPTE in the hash table */
  328. index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
  329. HPTE_V_VALID | HPTE_V_ABSENT);
  330. if (index < 0)
  331. return -ENOENT;
  332. hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
  333. v = hptep[0] & ~HPTE_V_HVLOCK;
  334. gr = kvm->arch.revmap[index].guest_rpte;
  335. /* Unlock the HPTE */
  336. asm volatile("lwsync" : : : "memory");
  337. hptep[0] = v;
  338. gpte->eaddr = eaddr;
  339. gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
  340. /* Get PP bits and key for permission check */
  341. pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
  342. key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
  343. key &= slb_v;
  344. /* Calculate permissions */
  345. gpte->may_read = hpte_read_permission(pp, key);
  346. gpte->may_write = hpte_write_permission(pp, key);
  347. gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
  348. /* Storage key permission check for POWER7 */
  349. if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
  350. int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
  351. if (amrfield & 1)
  352. gpte->may_read = 0;
  353. if (amrfield & 2)
  354. gpte->may_write = 0;
  355. }
  356. /* Get the guest physical address */
  357. gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
  358. return 0;
  359. }
  360. /*
  361. * Quick test for whether an instruction is a load or a store.
  362. * If the instruction is a load or a store, then this will indicate
  363. * which it is, at least on server processors. (Embedded processors
  364. * have some external PID instructions that don't follow the rule
  365. * embodied here.) If the instruction isn't a load or store, then
  366. * this doesn't return anything useful.
  367. */
  368. static int instruction_is_store(unsigned int instr)
  369. {
  370. unsigned int mask;
  371. mask = 0x10000000;
  372. if ((instr & 0xfc000000) == 0x7c000000)
  373. mask = 0x100; /* major opcode 31 */
  374. return (instr & mask) != 0;
  375. }
  376. static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
  377. unsigned long gpa, int is_store)
  378. {
  379. int ret;
  380. u32 last_inst;
  381. unsigned long srr0 = kvmppc_get_pc(vcpu);
  382. /* We try to load the last instruction. We don't let
  383. * emulate_instruction do it as it doesn't check what
  384. * kvmppc_ld returns.
  385. * If we fail, we just return to the guest and try executing it again.
  386. */
  387. if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
  388. ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
  389. if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
  390. return RESUME_GUEST;
  391. vcpu->arch.last_inst = last_inst;
  392. }
  393. /*
  394. * WARNING: We do not know for sure whether the instruction we just
  395. * read from memory is the same that caused the fault in the first
  396. * place. If the instruction we read is neither an load or a store,
  397. * then it can't access memory, so we don't need to worry about
  398. * enforcing access permissions. So, assuming it is a load or
  399. * store, we just check that its direction (load or store) is
  400. * consistent with the original fault, since that's what we
  401. * checked the access permissions against. If there is a mismatch
  402. * we just return and retry the instruction.
  403. */
  404. if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
  405. return RESUME_GUEST;
  406. /*
  407. * Emulated accesses are emulated by looking at the hash for
  408. * translation once, then performing the access later. The
  409. * translation could be invalidated in the meantime in which
  410. * point performing the subsequent memory access on the old
  411. * physical address could possibly be a security hole for the
  412. * guest (but not the host).
  413. *
  414. * This is less of an issue for MMIO stores since they aren't
  415. * globally visible. It could be an issue for MMIO loads to
  416. * a certain extent but we'll ignore it for now.
  417. */
  418. vcpu->arch.paddr_accessed = gpa;
  419. return kvmppc_emulate_mmio(run, vcpu);
  420. }
  421. int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
  422. unsigned long ea, unsigned long dsisr)
  423. {
  424. struct kvm *kvm = vcpu->kvm;
  425. unsigned long *hptep, hpte[3];
  426. unsigned long psize;
  427. unsigned long gfn;
  428. struct kvm_memory_slot *memslot;
  429. struct revmap_entry *rev;
  430. long index;
  431. /*
  432. * Real-mode code has already searched the HPT and found the
  433. * entry we're interested in. Lock the entry and check that
  434. * it hasn't changed. If it has, just return and re-execute the
  435. * instruction.
  436. */
  437. if (ea != vcpu->arch.pgfault_addr)
  438. return RESUME_GUEST;
  439. index = vcpu->arch.pgfault_index;
  440. hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
  441. rev = &kvm->arch.revmap[index];
  442. preempt_disable();
  443. while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
  444. cpu_relax();
  445. hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
  446. hpte[1] = hptep[1];
  447. hpte[2] = rev->guest_rpte;
  448. asm volatile("lwsync" : : : "memory");
  449. hptep[0] = hpte[0];
  450. preempt_enable();
  451. if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
  452. hpte[1] != vcpu->arch.pgfault_hpte[1])
  453. return RESUME_GUEST;
  454. /* Translate the logical address and get the page */
  455. psize = hpte_page_size(hpte[0], hpte[1]);
  456. gfn = hpte_rpn(hpte[2], psize);
  457. memslot = gfn_to_memslot(kvm, gfn);
  458. /* No memslot means it's an emulated MMIO region */
  459. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
  460. unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
  461. return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
  462. dsisr & DSISR_ISSTORE);
  463. }
  464. /* should never get here otherwise */
  465. return -EFAULT;
  466. }
  467. void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
  468. unsigned long *nb_ret)
  469. {
  470. struct kvm_memory_slot *memslot;
  471. unsigned long gfn = gpa >> PAGE_SHIFT;
  472. struct page *page;
  473. unsigned long psize, offset;
  474. unsigned long pa;
  475. unsigned long *physp;
  476. memslot = gfn_to_memslot(kvm, gfn);
  477. if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
  478. return NULL;
  479. physp = kvm->arch.slot_phys[memslot->id];
  480. if (!physp)
  481. return NULL;
  482. physp += gfn - memslot->base_gfn;
  483. pa = *physp;
  484. if (!pa) {
  485. if (kvmppc_get_guest_page(kvm, gfn, memslot, PAGE_SIZE) < 0)
  486. return NULL;
  487. pa = *physp;
  488. }
  489. page = pfn_to_page(pa >> PAGE_SHIFT);
  490. psize = PAGE_SIZE;
  491. if (PageHuge(page)) {
  492. page = compound_head(page);
  493. psize <<= compound_order(page);
  494. }
  495. get_page(page);
  496. offset = gpa & (psize - 1);
  497. if (nb_ret)
  498. *nb_ret = psize - offset;
  499. return page_address(page) + offset;
  500. }
  501. void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
  502. {
  503. struct page *page = virt_to_page(va);
  504. page = compound_head(page);
  505. put_page(page);
  506. }
  507. void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
  508. {
  509. struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
  510. if (cpu_has_feature(CPU_FTR_ARCH_206))
  511. vcpu->arch.slb_nr = 32; /* POWER7 */
  512. else
  513. vcpu->arch.slb_nr = 64;
  514. mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
  515. mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
  516. vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
  517. }