vtlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * vtlb.c: guest virtual tlb handling module.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
  5. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  6. *
  7. * Copyright (c) 2007, Intel Corporation.
  8. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include "vcpu.h"
  26. #include <linux/rwsem.h>
  27. #include <asm/tlb.h>
  28. /*
  29. * Check to see if the address rid:va is translated by the TLB
  30. */
  31. static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
  32. {
  33. return ((trp->p) && (trp->rid == rid)
  34. && ((va-trp->vadr) < PSIZE(trp->ps)));
  35. }
  36. /*
  37. * Only for GUEST TR format.
  38. */
  39. static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
  40. {
  41. u64 sa1, ea1;
  42. if (!trp->p || trp->rid != rid)
  43. return 0;
  44. sa1 = trp->vadr;
  45. ea1 = sa1 + PSIZE(trp->ps) - 1;
  46. eva -= 1;
  47. if ((sva > ea1) || (sa1 > eva))
  48. return 0;
  49. else
  50. return 1;
  51. }
  52. void machine_tlb_purge(u64 va, u64 ps)
  53. {
  54. ia64_ptcl(va, ps << 2);
  55. }
  56. void local_flush_tlb_all(void)
  57. {
  58. int i, j;
  59. unsigned long flags, count0, count1;
  60. unsigned long stride0, stride1, addr;
  61. addr = current_vcpu->arch.ptce_base;
  62. count0 = current_vcpu->arch.ptce_count[0];
  63. count1 = current_vcpu->arch.ptce_count[1];
  64. stride0 = current_vcpu->arch.ptce_stride[0];
  65. stride1 = current_vcpu->arch.ptce_stride[1];
  66. local_irq_save(flags);
  67. for (i = 0; i < count0; ++i) {
  68. for (j = 0; j < count1; ++j) {
  69. ia64_ptce(addr);
  70. addr += stride1;
  71. }
  72. addr += stride0;
  73. }
  74. local_irq_restore(flags);
  75. ia64_srlz_i(); /* srlz.i implies srlz.d */
  76. }
  77. int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
  78. {
  79. union ia64_rr vrr;
  80. union ia64_pta vpta;
  81. struct ia64_psr vpsr;
  82. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  83. vrr.val = vcpu_get_rr(vcpu, vadr);
  84. vpta.val = vcpu_get_pta(vcpu);
  85. if (vrr.ve & vpta.ve) {
  86. switch (ref) {
  87. case DATA_REF:
  88. case NA_REF:
  89. return vpsr.dt;
  90. case INST_REF:
  91. return vpsr.dt && vpsr.it && vpsr.ic;
  92. case RSE_REF:
  93. return vpsr.dt && vpsr.rt;
  94. }
  95. }
  96. return 0;
  97. }
  98. struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
  99. {
  100. u64 index, pfn, rid, pfn_bits;
  101. pfn_bits = vpta.size - 5 - 8;
  102. pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
  103. rid = _REGION_ID(vrr);
  104. index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
  105. *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
  106. return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
  107. (index << 5));
  108. }
  109. struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
  110. {
  111. struct thash_data *trp;
  112. int i;
  113. u64 rid;
  114. rid = vcpu_get_rr(vcpu, va);
  115. rid = rid & RR_RID_MASK;;
  116. if (type == D_TLB) {
  117. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  118. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  119. i < NDTRS; i++, trp++) {
  120. if (__is_tr_translated(trp, rid, va))
  121. return trp;
  122. }
  123. }
  124. } else {
  125. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  126. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  127. i < NITRS; i++, trp++) {
  128. if (__is_tr_translated(trp, rid, va))
  129. return trp;
  130. }
  131. }
  132. }
  133. return NULL;
  134. }
  135. static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
  136. {
  137. union ia64_rr rr;
  138. struct thash_data *head;
  139. unsigned long ps, gpaddr;
  140. ps = itir_ps(itir);
  141. gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
  142. (ifa & ((1UL << ps) - 1));
  143. rr.val = ia64_get_rr(ifa);
  144. head = (struct thash_data *)ia64_thash(ifa);
  145. head->etag = INVALID_TI_TAG;
  146. ia64_mf();
  147. head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
  148. head->itir = rr.ps << 2;
  149. head->etag = ia64_ttag(ifa);
  150. head->gpaddr = gpaddr;
  151. }
  152. void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
  153. {
  154. u64 i, dirty_pages = 1;
  155. u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
  156. spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
  157. void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
  158. dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
  159. vmm_spin_lock(lock);
  160. for (i = 0; i < dirty_pages; i++) {
  161. /* avoid RMW */
  162. if (!test_bit(base_gfn + i, dirty_bitmap))
  163. set_bit(base_gfn + i , dirty_bitmap);
  164. }
  165. vmm_spin_unlock(lock);
  166. }
  167. void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
  168. {
  169. u64 phy_pte, psr;
  170. union ia64_rr mrr;
  171. mrr.val = ia64_get_rr(va);
  172. phy_pte = translate_phy_pte(&pte, itir, va);
  173. if (itir_ps(itir) >= mrr.ps) {
  174. vhpt_insert(phy_pte, itir, va, pte);
  175. } else {
  176. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  177. psr = ia64_clear_ic();
  178. ia64_itc(type, va, phy_pte, itir_ps(itir));
  179. ia64_set_psr(psr);
  180. }
  181. if (!(pte&VTLB_PTE_IO))
  182. mark_pages_dirty(v, pte, itir_ps(itir));
  183. }
  184. /*
  185. * vhpt lookup
  186. */
  187. struct thash_data *vhpt_lookup(u64 va)
  188. {
  189. struct thash_data *head;
  190. u64 tag;
  191. head = (struct thash_data *)ia64_thash(va);
  192. tag = ia64_ttag(va);
  193. if (head->etag == tag)
  194. return head;
  195. return NULL;
  196. }
  197. u64 guest_vhpt_lookup(u64 iha, u64 *pte)
  198. {
  199. u64 ret;
  200. struct thash_data *data;
  201. data = __vtr_lookup(current_vcpu, iha, D_TLB);
  202. if (data != NULL)
  203. thash_vhpt_insert(current_vcpu, data->page_flags,
  204. data->itir, iha, D_TLB);
  205. asm volatile ("rsm psr.ic|psr.i;;"
  206. "srlz.d;;"
  207. "ld8.s r9=[%1];;"
  208. "tnat.nz p6,p7=r9;;"
  209. "(p6) mov %0=1;"
  210. "(p6) mov r9=r0;"
  211. "(p7) extr.u r9=r9,0,53;;"
  212. "(p7) mov %0=r0;"
  213. "(p7) st8 [%2]=r9;;"
  214. "ssm psr.ic;;"
  215. "srlz.d;;"
  216. /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
  217. : "=r"(ret) : "r"(iha), "r"(pte):"memory");
  218. return ret;
  219. }
  220. /*
  221. * purge software guest tlb
  222. */
  223. static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  224. {
  225. struct thash_data *cur;
  226. u64 start, curadr, size, psbits, tag, rr_ps, num;
  227. union ia64_rr vrr;
  228. struct thash_cb *hcb = &v->arch.vtlb;
  229. vrr.val = vcpu_get_rr(v, va);
  230. psbits = VMX(v, psbits[(va >> 61)]);
  231. start = va & ~((1UL << ps) - 1);
  232. while (psbits) {
  233. curadr = start;
  234. rr_ps = __ffs(psbits);
  235. psbits &= ~(1UL << rr_ps);
  236. num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
  237. size = PSIZE(rr_ps);
  238. vrr.ps = rr_ps;
  239. while (num) {
  240. cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
  241. if (cur->etag == tag && cur->ps == rr_ps)
  242. cur->etag = INVALID_TI_TAG;
  243. curadr += size;
  244. num--;
  245. }
  246. }
  247. }
  248. /*
  249. * purge VHPT and machine TLB
  250. */
  251. static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  252. {
  253. struct thash_data *cur;
  254. u64 start, size, tag, num;
  255. union ia64_rr rr;
  256. start = va & ~((1UL << ps) - 1);
  257. rr.val = ia64_get_rr(va);
  258. size = PSIZE(rr.ps);
  259. num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
  260. while (num) {
  261. cur = (struct thash_data *)ia64_thash(start);
  262. tag = ia64_ttag(start);
  263. if (cur->etag == tag)
  264. cur->etag = INVALID_TI_TAG;
  265. start += size;
  266. num--;
  267. }
  268. machine_tlb_purge(va, ps);
  269. }
  270. /*
  271. * Insert an entry into hash TLB or VHPT.
  272. * NOTES:
  273. * 1: When inserting VHPT to thash, "va" is a must covered
  274. * address by the inserted machine VHPT entry.
  275. * 2: The format of entry is always in TLB.
  276. * 3: The caller need to make sure the new entry will not overlap
  277. * with any existed entry.
  278. */
  279. void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
  280. {
  281. struct thash_data *head;
  282. union ia64_rr vrr;
  283. u64 tag;
  284. struct thash_cb *hcb = &v->arch.vtlb;
  285. vrr.val = vcpu_get_rr(v, va);
  286. vrr.ps = itir_ps(itir);
  287. VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
  288. head = vsa_thash(hcb->pta, va, vrr.val, &tag);
  289. head->page_flags = pte;
  290. head->itir = itir;
  291. head->etag = tag;
  292. }
  293. int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
  294. {
  295. struct thash_data *trp;
  296. int i;
  297. u64 end, rid;
  298. rid = vcpu_get_rr(vcpu, va);
  299. rid = rid & RR_RID_MASK;
  300. end = va + PSIZE(ps);
  301. if (type == D_TLB) {
  302. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  303. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  304. i < NDTRS; i++, trp++) {
  305. if (__is_tr_overlap(trp, rid, va, end))
  306. return i;
  307. }
  308. }
  309. } else {
  310. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  311. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  312. i < NITRS; i++, trp++) {
  313. if (__is_tr_overlap(trp, rid, va, end))
  314. return i;
  315. }
  316. }
  317. }
  318. return -1;
  319. }
  320. /*
  321. * Purge entries in VTLB and VHPT
  322. */
  323. void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
  324. {
  325. if (vcpu_quick_region_check(v->arch.tc_regions, va))
  326. vtlb_purge(v, va, ps);
  327. vhpt_purge(v, va, ps);
  328. }
  329. void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
  330. {
  331. u64 old_va = va;
  332. va = REGION_OFFSET(va);
  333. if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
  334. vtlb_purge(v, va, ps);
  335. vhpt_purge(v, va, ps);
  336. }
  337. u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
  338. {
  339. u64 ps, ps_mask, paddr, maddr, io_mask;
  340. union pte_flags phy_pte;
  341. ps = itir_ps(itir);
  342. ps_mask = ~((1UL << ps) - 1);
  343. phy_pte.val = *pte;
  344. paddr = *pte;
  345. paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
  346. maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
  347. io_mask = maddr & GPFN_IO_MASK;
  348. if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
  349. *pte |= VTLB_PTE_IO;
  350. return -1;
  351. }
  352. maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
  353. (paddr & ~PAGE_MASK);
  354. phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
  355. return phy_pte.val;
  356. }
  357. /*
  358. * Purge overlap TCs and then insert the new entry to emulate itc ops.
  359. * Notes: Only TC entry can purge and insert.
  360. * 1 indicates this is MMIO
  361. */
  362. int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
  363. u64 ifa, int type)
  364. {
  365. u64 ps;
  366. u64 phy_pte, io_mask, index;
  367. union ia64_rr vrr, mrr;
  368. int ret = 0;
  369. ps = itir_ps(itir);
  370. vrr.val = vcpu_get_rr(v, ifa);
  371. mrr.val = ia64_get_rr(ifa);
  372. index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  373. io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
  374. phy_pte = translate_phy_pte(&pte, itir, ifa);
  375. /* Ensure WB attribute if pte is related to a normal mem page,
  376. * which is required by vga acceleration since qemu maps shared
  377. * vram buffer with WB.
  378. */
  379. if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
  380. io_mask != GPFN_PHYS_MMIO) {
  381. pte &= ~_PAGE_MA_MASK;
  382. phy_pte &= ~_PAGE_MA_MASK;
  383. }
  384. if (pte & VTLB_PTE_IO)
  385. ret = 1;
  386. vtlb_purge(v, ifa, ps);
  387. vhpt_purge(v, ifa, ps);
  388. if (ps == mrr.ps) {
  389. if (!(pte&VTLB_PTE_IO)) {
  390. vhpt_insert(phy_pte, itir, ifa, pte);
  391. } else {
  392. vtlb_insert(v, pte, itir, ifa);
  393. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  394. }
  395. } else if (ps > mrr.ps) {
  396. vtlb_insert(v, pte, itir, ifa);
  397. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  398. if (!(pte&VTLB_PTE_IO))
  399. vhpt_insert(phy_pte, itir, ifa, pte);
  400. } else {
  401. u64 psr;
  402. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  403. psr = ia64_clear_ic();
  404. ia64_itc(type, ifa, phy_pte, ps);
  405. ia64_set_psr(psr);
  406. }
  407. if (!(pte&VTLB_PTE_IO))
  408. mark_pages_dirty(v, pte, ps);
  409. return ret;
  410. }
  411. /*
  412. * Purge all TCs or VHPT entries including those in Hash table.
  413. *
  414. */
  415. void thash_purge_all(struct kvm_vcpu *v)
  416. {
  417. int i;
  418. struct thash_data *head;
  419. struct thash_cb *vtlb, *vhpt;
  420. vtlb = &v->arch.vtlb;
  421. vhpt = &v->arch.vhpt;
  422. for (i = 0; i < 8; i++)
  423. VMX(v, psbits[i]) = 0;
  424. head = vtlb->hash;
  425. for (i = 0; i < vtlb->num; i++) {
  426. head->page_flags = 0;
  427. head->etag = INVALID_TI_TAG;
  428. head->itir = 0;
  429. head->next = 0;
  430. head++;
  431. };
  432. head = vhpt->hash;
  433. for (i = 0; i < vhpt->num; i++) {
  434. head->page_flags = 0;
  435. head->etag = INVALID_TI_TAG;
  436. head->itir = 0;
  437. head->next = 0;
  438. head++;
  439. };
  440. local_flush_tlb_all();
  441. }
  442. /*
  443. * Lookup the hash table and its collision chain to find an entry
  444. * covering this address rid:va or the entry.
  445. *
  446. * INPUT:
  447. * in: TLB format for both VHPT & TLB.
  448. */
  449. struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
  450. {
  451. struct thash_data *cch;
  452. u64 psbits, ps, tag;
  453. union ia64_rr vrr;
  454. struct thash_cb *hcb = &v->arch.vtlb;
  455. cch = __vtr_lookup(v, va, is_data);;
  456. if (cch)
  457. return cch;
  458. if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
  459. return NULL;
  460. psbits = VMX(v, psbits[(va >> 61)]);
  461. vrr.val = vcpu_get_rr(v, va);
  462. while (psbits) {
  463. ps = __ffs(psbits);
  464. psbits &= ~(1UL << ps);
  465. vrr.ps = ps;
  466. cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
  467. if (cch->etag == tag && cch->ps == ps)
  468. return cch;
  469. }
  470. return NULL;
  471. }
  472. /*
  473. * Initialize internal control data before service.
  474. */
  475. void thash_init(struct thash_cb *hcb, u64 sz)
  476. {
  477. int i;
  478. struct thash_data *head;
  479. hcb->pta.val = (unsigned long)hcb->hash;
  480. hcb->pta.vf = 1;
  481. hcb->pta.ve = 1;
  482. hcb->pta.size = sz;
  483. head = hcb->hash;
  484. for (i = 0; i < hcb->num; i++) {
  485. head->page_flags = 0;
  486. head->itir = 0;
  487. head->etag = INVALID_TI_TAG;
  488. head->next = 0;
  489. head++;
  490. }
  491. }
  492. u64 kvm_get_mpt_entry(u64 gpfn)
  493. {
  494. u64 *base = (u64 *) KVM_P2M_BASE;
  495. return *(base + gpfn);
  496. }
  497. u64 kvm_lookup_mpa(u64 gpfn)
  498. {
  499. u64 maddr;
  500. maddr = kvm_get_mpt_entry(gpfn);
  501. return maddr&_PAGE_PPN_MASK;
  502. }
  503. u64 kvm_gpa_to_mpa(u64 gpa)
  504. {
  505. u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
  506. return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
  507. }
  508. /*
  509. * Fetch guest bundle code.
  510. * INPUT:
  511. * gip: guest ip
  512. * pbundle: used to return fetched bundle.
  513. */
  514. int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
  515. {
  516. u64 gpip = 0; /* guest physical IP*/
  517. u64 *vpa;
  518. struct thash_data *tlb;
  519. u64 maddr;
  520. if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
  521. /* I-side physical mode */
  522. gpip = gip;
  523. } else {
  524. tlb = vtlb_lookup(vcpu, gip, I_TLB);
  525. if (tlb)
  526. gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
  527. (gip & (PSIZE(tlb->ps) - 1));
  528. }
  529. if (gpip) {
  530. maddr = kvm_gpa_to_mpa(gpip);
  531. } else {
  532. tlb = vhpt_lookup(gip);
  533. if (tlb == NULL) {
  534. ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
  535. return IA64_FAULT;
  536. }
  537. maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
  538. | (gip & (PSIZE(tlb->ps) - 1));
  539. }
  540. vpa = (u64 *)__kvm_va(maddr);
  541. pbundle->i64[0] = *vpa++;
  542. pbundle->i64[1] = *vpa;
  543. return IA64_NO_FAULT;
  544. }
  545. void kvm_init_vhpt(struct kvm_vcpu *v)
  546. {
  547. v->arch.vhpt.num = VHPT_NUM_ENTRIES;
  548. thash_init(&v->arch.vhpt, VHPT_SHIFT);
  549. ia64_set_pta(v->arch.vhpt.pta.val);
  550. /*Enable VHPT here?*/
  551. }
  552. void kvm_init_vtlb(struct kvm_vcpu *v)
  553. {
  554. v->arch.vtlb.num = VTLB_NUM_ENTRIES;
  555. thash_init(&v->arch.vtlb, VTLB_SHIFT);
  556. }