book3s_64_mmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <asm/tlbflush.h>
  25. #include <asm/kvm_ppc.h>
  26. #include <asm/kvm_book3s.h>
  27. #include <asm/mmu-hash64.h>
  28. /* #define DEBUG_MMU */
  29. #ifdef DEBUG_MMU
  30. #define dprintk(X...) printk(KERN_INFO X)
  31. #else
  32. #define dprintk(X...) do { } while(0)
  33. #endif
  34. static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
  35. {
  36. kvmppc_set_msr(vcpu, MSR_SF);
  37. }
  38. static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
  39. struct kvm_vcpu *vcpu,
  40. gva_t eaddr)
  41. {
  42. int i;
  43. u64 esid = GET_ESID(eaddr);
  44. u64 esid_1t = GET_ESID_1T(eaddr);
  45. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  46. u64 cmp_esid = esid;
  47. if (!vcpu->arch.slb[i].valid)
  48. continue;
  49. if (vcpu->arch.slb[i].tb)
  50. cmp_esid = esid_1t;
  51. if (vcpu->arch.slb[i].esid == cmp_esid)
  52. return &vcpu->arch.slb[i];
  53. }
  54. dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
  55. eaddr, esid, esid_1t);
  56. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  57. if (vcpu->arch.slb[i].vsid)
  58. dprintk(" %d: %c%c%c %llx %llx\n", i,
  59. vcpu->arch.slb[i].valid ? 'v' : ' ',
  60. vcpu->arch.slb[i].large ? 'l' : ' ',
  61. vcpu->arch.slb[i].tb ? 't' : ' ',
  62. vcpu->arch.slb[i].esid,
  63. vcpu->arch.slb[i].vsid);
  64. }
  65. return NULL;
  66. }
  67. static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
  68. {
  69. return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
  70. }
  71. static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
  72. {
  73. return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
  74. }
  75. static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
  76. {
  77. eaddr &= kvmppc_slb_offset_mask(slb);
  78. return (eaddr >> VPN_SHIFT) |
  79. ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
  80. }
  81. static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
  82. bool data)
  83. {
  84. struct kvmppc_slb *slb;
  85. slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
  86. if (!slb)
  87. return 0;
  88. return kvmppc_slb_calc_vpn(slb, eaddr);
  89. }
  90. static int mmu_pagesize(int mmu_pg)
  91. {
  92. switch (mmu_pg) {
  93. case MMU_PAGE_64K:
  94. return 16;
  95. case MMU_PAGE_16M:
  96. return 24;
  97. }
  98. return 12;
  99. }
  100. static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
  101. {
  102. return mmu_pagesize(slbe->base_page_size);
  103. }
  104. static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
  105. {
  106. int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
  107. return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
  108. }
  109. static hva_t kvmppc_mmu_book3s_64_get_pteg(
  110. struct kvmppc_vcpu_book3s *vcpu_book3s,
  111. struct kvmppc_slb *slbe, gva_t eaddr,
  112. bool second)
  113. {
  114. u64 hash, pteg, htabsize;
  115. u32 ssize;
  116. hva_t r;
  117. u64 vpn;
  118. htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
  119. vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
  120. ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
  121. hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
  122. if (second)
  123. hash = ~hash;
  124. hash &= ((1ULL << 39ULL) - 1ULL);
  125. hash &= htabsize;
  126. hash <<= 7ULL;
  127. pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  128. pteg |= hash;
  129. dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
  130. page, vcpu_book3s->sdr1, pteg, slbe->vsid);
  131. /* When running a PAPR guest, SDR1 contains a HVA address instead
  132. of a GPA */
  133. if (vcpu_book3s->vcpu.arch.papr_enabled)
  134. r = pteg;
  135. else
  136. r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
  137. if (kvm_is_error_hva(r))
  138. return r;
  139. return r | (pteg & ~PAGE_MASK);
  140. }
  141. static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
  142. {
  143. int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
  144. u64 avpn;
  145. avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
  146. avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
  147. if (p < 16)
  148. avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
  149. else
  150. avpn <<= p - 16;
  151. return avpn;
  152. }
  153. /*
  154. * Return page size encoded in the second word of a HPTE, or
  155. * -1 for an invalid encoding for the base page size indicated by
  156. * the SLB entry. This doesn't handle mixed pagesize segments yet.
  157. */
  158. static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
  159. {
  160. switch (slbe->base_page_size) {
  161. case MMU_PAGE_64K:
  162. if ((r & 0xf000) == 0x1000)
  163. return MMU_PAGE_64K;
  164. break;
  165. case MMU_PAGE_16M:
  166. if ((r & 0xff000) == 0)
  167. return MMU_PAGE_16M;
  168. break;
  169. }
  170. return -1;
  171. }
  172. static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
  173. struct kvmppc_pte *gpte, bool data)
  174. {
  175. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  176. struct kvmppc_slb *slbe;
  177. hva_t ptegp;
  178. u64 pteg[16];
  179. u64 avpn = 0;
  180. u64 v, r;
  181. u64 v_val, v_mask;
  182. u64 eaddr_mask;
  183. int i;
  184. u8 pp, key = 0;
  185. bool found = false;
  186. bool second = false;
  187. int pgsize;
  188. ulong mp_ea = vcpu->arch.magic_page_ea;
  189. /* Magic page override */
  190. if (unlikely(mp_ea) &&
  191. unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
  192. !(vcpu->arch.shared->msr & MSR_PR)) {
  193. gpte->eaddr = eaddr;
  194. gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
  195. gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
  196. gpte->raddr &= KVM_PAM;
  197. gpte->may_execute = true;
  198. gpte->may_read = true;
  199. gpte->may_write = true;
  200. gpte->page_size = MMU_PAGE_4K;
  201. return 0;
  202. }
  203. slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
  204. if (!slbe)
  205. goto no_seg_found;
  206. avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
  207. v_val = avpn & HPTE_V_AVPN;
  208. if (slbe->tb)
  209. v_val |= SLB_VSID_B_1T;
  210. if (slbe->large)
  211. v_val |= HPTE_V_LARGE;
  212. v_val |= HPTE_V_VALID;
  213. v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
  214. HPTE_V_SECONDARY;
  215. pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
  216. do_second:
  217. ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
  218. if (kvm_is_error_hva(ptegp))
  219. goto no_page_found;
  220. if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
  221. printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
  222. goto no_page_found;
  223. }
  224. if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
  225. key = 4;
  226. else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
  227. key = 4;
  228. for (i=0; i<16; i+=2) {
  229. /* Check all relevant fields of 1st dword */
  230. if ((pteg[i] & v_mask) == v_val) {
  231. /* If large page bit is set, check pgsize encoding */
  232. if (slbe->large &&
  233. (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
  234. pgsize = decode_pagesize(slbe, pteg[i+1]);
  235. if (pgsize < 0)
  236. continue;
  237. }
  238. found = true;
  239. break;
  240. }
  241. }
  242. if (!found) {
  243. if (second)
  244. goto no_page_found;
  245. v_val |= HPTE_V_SECONDARY;
  246. second = true;
  247. goto do_second;
  248. }
  249. v = pteg[i];
  250. r = pteg[i+1];
  251. pp = (r & HPTE_R_PP) | key;
  252. gpte->eaddr = eaddr;
  253. gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
  254. eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
  255. gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
  256. gpte->page_size = pgsize;
  257. gpte->may_execute = ((r & HPTE_R_N) ? false : true);
  258. gpte->may_read = false;
  259. gpte->may_write = false;
  260. switch (pp) {
  261. case 0:
  262. case 1:
  263. case 2:
  264. case 6:
  265. gpte->may_write = true;
  266. /* fall through */
  267. case 3:
  268. case 5:
  269. case 7:
  270. gpte->may_read = true;
  271. break;
  272. }
  273. dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
  274. "-> 0x%lx\n",
  275. eaddr, avpn, gpte->vpage, gpte->raddr);
  276. /* Update PTE R and C bits, so the guest's swapper knows we used the
  277. * page */
  278. if (gpte->may_read) {
  279. /* Set the accessed flag */
  280. r |= HPTE_R_R;
  281. }
  282. if (data && gpte->may_write) {
  283. /* Set the dirty flag -- XXX even if not writing */
  284. r |= HPTE_R_C;
  285. }
  286. /* Write back into the PTEG */
  287. if (pteg[i+1] != r) {
  288. pteg[i+1] = r;
  289. copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
  290. }
  291. if (!gpte->may_read)
  292. return -EPERM;
  293. return 0;
  294. no_page_found:
  295. return -ENOENT;
  296. no_seg_found:
  297. dprintk("KVM MMU: Trigger segment fault\n");
  298. return -EINVAL;
  299. }
  300. static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
  301. {
  302. struct kvmppc_vcpu_book3s *vcpu_book3s;
  303. u64 esid, esid_1t;
  304. int slb_nr;
  305. struct kvmppc_slb *slbe;
  306. dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
  307. vcpu_book3s = to_book3s(vcpu);
  308. esid = GET_ESID(rb);
  309. esid_1t = GET_ESID_1T(rb);
  310. slb_nr = rb & 0xfff;
  311. if (slb_nr > vcpu->arch.slb_nr)
  312. return;
  313. slbe = &vcpu->arch.slb[slb_nr];
  314. slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
  315. slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
  316. slbe->esid = slbe->tb ? esid_1t : esid;
  317. slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
  318. slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
  319. slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
  320. slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
  321. slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
  322. slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
  323. slbe->base_page_size = MMU_PAGE_4K;
  324. if (slbe->large) {
  325. if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
  326. switch (rs & SLB_VSID_LP) {
  327. case SLB_VSID_LP_00:
  328. slbe->base_page_size = MMU_PAGE_16M;
  329. break;
  330. case SLB_VSID_LP_01:
  331. slbe->base_page_size = MMU_PAGE_64K;
  332. break;
  333. }
  334. } else
  335. slbe->base_page_size = MMU_PAGE_16M;
  336. }
  337. slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
  338. slbe->origv = rs;
  339. /* Map the new segment */
  340. kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
  341. }
  342. static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
  343. {
  344. struct kvmppc_slb *slbe;
  345. if (slb_nr > vcpu->arch.slb_nr)
  346. return 0;
  347. slbe = &vcpu->arch.slb[slb_nr];
  348. return slbe->orige;
  349. }
  350. static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
  351. {
  352. struct kvmppc_slb *slbe;
  353. if (slb_nr > vcpu->arch.slb_nr)
  354. return 0;
  355. slbe = &vcpu->arch.slb[slb_nr];
  356. return slbe->origv;
  357. }
  358. static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
  359. {
  360. struct kvmppc_slb *slbe;
  361. u64 seg_size;
  362. dprintk("KVM MMU: slbie(0x%llx)\n", ea);
  363. slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
  364. if (!slbe)
  365. return;
  366. dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
  367. slbe->valid = false;
  368. slbe->orige = 0;
  369. slbe->origv = 0;
  370. seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
  371. kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
  372. }
  373. static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
  374. {
  375. int i;
  376. dprintk("KVM MMU: slbia()\n");
  377. for (i = 1; i < vcpu->arch.slb_nr; i++) {
  378. vcpu->arch.slb[i].valid = false;
  379. vcpu->arch.slb[i].orige = 0;
  380. vcpu->arch.slb[i].origv = 0;
  381. }
  382. if (vcpu->arch.shared->msr & MSR_IR) {
  383. kvmppc_mmu_flush_segments(vcpu);
  384. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  385. }
  386. }
  387. static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
  388. ulong value)
  389. {
  390. u64 rb = 0, rs = 0;
  391. /*
  392. * According to Book3 2.01 mtsrin is implemented as:
  393. *
  394. * The SLB entry specified by (RB)32:35 is loaded from register
  395. * RS, as follows.
  396. *
  397. * SLBE Bit Source SLB Field
  398. *
  399. * 0:31 0x0000_0000 ESID-0:31
  400. * 32:35 (RB)32:35 ESID-32:35
  401. * 36 0b1 V
  402. * 37:61 0x00_0000|| 0b0 VSID-0:24
  403. * 62:88 (RS)37:63 VSID-25:51
  404. * 89:91 (RS)33:35 Ks Kp N
  405. * 92 (RS)36 L ((RS)36 must be 0b0)
  406. * 93 0b0 C
  407. */
  408. dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
  409. /* ESID = srnum */
  410. rb |= (srnum & 0xf) << 28;
  411. /* Set the valid bit */
  412. rb |= 1 << 27;
  413. /* Index = ESID */
  414. rb |= srnum;
  415. /* VSID = VSID */
  416. rs |= (value & 0xfffffff) << 12;
  417. /* flags = flags */
  418. rs |= ((value >> 28) & 0x7) << 9;
  419. kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
  420. }
  421. static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
  422. bool large)
  423. {
  424. u64 mask = 0xFFFFFFFFFULL;
  425. dprintk("KVM MMU: tlbie(0x%lx)\n", va);
  426. /*
  427. * The tlbie instruction changed behaviour starting with
  428. * POWER6. POWER6 and later don't have the large page flag
  429. * in the instruction but in the RB value, along with bits
  430. * indicating page and segment sizes.
  431. */
  432. if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
  433. /* POWER6 or later */
  434. if (va & 1) { /* L bit */
  435. if ((va & 0xf000) == 0x1000)
  436. mask = 0xFFFFFFFF0ULL; /* 64k page */
  437. else
  438. mask = 0xFFFFFF000ULL; /* 16M page */
  439. }
  440. } else {
  441. /* older processors, e.g. PPC970 */
  442. if (large)
  443. mask = 0xFFFFFF000ULL;
  444. }
  445. kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
  446. }
  447. #ifdef CONFIG_PPC_64K_PAGES
  448. static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
  449. {
  450. ulong mp_ea = vcpu->arch.magic_page_ea;
  451. return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
  452. (mp_ea >> SID_SHIFT) == esid;
  453. }
  454. #endif
  455. static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
  456. u64 *vsid)
  457. {
  458. ulong ea = esid << SID_SHIFT;
  459. struct kvmppc_slb *slb;
  460. u64 gvsid = esid;
  461. ulong mp_ea = vcpu->arch.magic_page_ea;
  462. int pagesize = MMU_PAGE_64K;
  463. if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
  464. slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
  465. if (slb) {
  466. gvsid = slb->vsid;
  467. pagesize = slb->base_page_size;
  468. if (slb->tb) {
  469. gvsid <<= SID_SHIFT_1T - SID_SHIFT;
  470. gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
  471. gvsid |= VSID_1T;
  472. }
  473. }
  474. }
  475. switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
  476. case 0:
  477. gvsid = VSID_REAL | esid;
  478. break;
  479. case MSR_IR:
  480. gvsid |= VSID_REAL_IR;
  481. break;
  482. case MSR_DR:
  483. gvsid |= VSID_REAL_DR;
  484. break;
  485. case MSR_DR|MSR_IR:
  486. if (!slb)
  487. goto no_slb;
  488. break;
  489. default:
  490. BUG();
  491. break;
  492. }
  493. #ifdef CONFIG_PPC_64K_PAGES
  494. /*
  495. * Mark this as a 64k segment if the host is using
  496. * 64k pages, the host MMU supports 64k pages and
  497. * the guest segment page size is >= 64k,
  498. * but not if this segment contains the magic page.
  499. */
  500. if (pagesize >= MMU_PAGE_64K &&
  501. mmu_psize_defs[MMU_PAGE_64K].shift &&
  502. !segment_contains_magic_page(vcpu, esid))
  503. gvsid |= VSID_64K;
  504. #endif
  505. if (vcpu->arch.shared->msr & MSR_PR)
  506. gvsid |= VSID_PR;
  507. *vsid = gvsid;
  508. return 0;
  509. no_slb:
  510. /* Catch magic page case */
  511. if (unlikely(mp_ea) &&
  512. unlikely(esid == (mp_ea >> SID_SHIFT)) &&
  513. !(vcpu->arch.shared->msr & MSR_PR)) {
  514. *vsid = VSID_REAL | esid;
  515. return 0;
  516. }
  517. return -EINVAL;
  518. }
  519. static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
  520. {
  521. return (to_book3s(vcpu)->hid[5] & 0x80);
  522. }
  523. void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
  524. {
  525. struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
  526. mmu->mfsrin = NULL;
  527. mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
  528. mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
  529. mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
  530. mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
  531. mmu->slbie = kvmppc_mmu_book3s_64_slbie;
  532. mmu->slbia = kvmppc_mmu_book3s_64_slbia;
  533. mmu->xlate = kvmppc_mmu_book3s_64_xlate;
  534. mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
  535. mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
  536. mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
  537. mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
  538. mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
  539. vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
  540. }