vtlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /*
  2. * vtlb.c: guest virtual tlb handling module.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
  5. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  6. *
  7. * Copyright (c) 2007, Intel Corporation.
  8. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include "vcpu.h"
  26. #include <linux/rwsem.h>
  27. #include <asm/tlb.h>
  28. /*
  29. * Check to see if the address rid:va is translated by the TLB
  30. */
  31. static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
  32. {
  33. return ((trp->p) && (trp->rid == rid)
  34. && ((va-trp->vadr) < PSIZE(trp->ps)));
  35. }
  36. /*
  37. * Only for GUEST TR format.
  38. */
  39. static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
  40. {
  41. u64 sa1, ea1;
  42. if (!trp->p || trp->rid != rid)
  43. return 0;
  44. sa1 = trp->vadr;
  45. ea1 = sa1 + PSIZE(trp->ps) - 1;
  46. eva -= 1;
  47. if ((sva > ea1) || (sa1 > eva))
  48. return 0;
  49. else
  50. return 1;
  51. }
  52. void machine_tlb_purge(u64 va, u64 ps)
  53. {
  54. ia64_ptcl(va, ps << 2);
  55. }
  56. void local_flush_tlb_all(void)
  57. {
  58. int i, j;
  59. unsigned long flags, count0, count1;
  60. unsigned long stride0, stride1, addr;
  61. addr = current_vcpu->arch.ptce_base;
  62. count0 = current_vcpu->arch.ptce_count[0];
  63. count1 = current_vcpu->arch.ptce_count[1];
  64. stride0 = current_vcpu->arch.ptce_stride[0];
  65. stride1 = current_vcpu->arch.ptce_stride[1];
  66. local_irq_save(flags);
  67. for (i = 0; i < count0; ++i) {
  68. for (j = 0; j < count1; ++j) {
  69. ia64_ptce(addr);
  70. addr += stride1;
  71. }
  72. addr += stride0;
  73. }
  74. local_irq_restore(flags);
  75. ia64_srlz_i(); /* srlz.i implies srlz.d */
  76. }
  77. int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
  78. {
  79. union ia64_rr vrr;
  80. union ia64_pta vpta;
  81. struct ia64_psr vpsr;
  82. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  83. vrr.val = vcpu_get_rr(vcpu, vadr);
  84. vpta.val = vcpu_get_pta(vcpu);
  85. if (vrr.ve & vpta.ve) {
  86. switch (ref) {
  87. case DATA_REF:
  88. case NA_REF:
  89. return vpsr.dt;
  90. case INST_REF:
  91. return vpsr.dt && vpsr.it && vpsr.ic;
  92. case RSE_REF:
  93. return vpsr.dt && vpsr.rt;
  94. }
  95. }
  96. return 0;
  97. }
  98. struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
  99. {
  100. u64 index, pfn, rid, pfn_bits;
  101. pfn_bits = vpta.size - 5 - 8;
  102. pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
  103. rid = _REGION_ID(vrr);
  104. index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
  105. *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
  106. return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
  107. (index << 5));
  108. }
  109. struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
  110. {
  111. struct thash_data *trp;
  112. int i;
  113. u64 rid;
  114. rid = vcpu_get_rr(vcpu, va);
  115. rid = rid & RR_RID_MASK;;
  116. if (type == D_TLB) {
  117. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  118. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  119. i < NDTRS; i++, trp++) {
  120. if (__is_tr_translated(trp, rid, va))
  121. return trp;
  122. }
  123. }
  124. } else {
  125. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  126. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  127. i < NITRS; i++, trp++) {
  128. if (__is_tr_translated(trp, rid, va))
  129. return trp;
  130. }
  131. }
  132. }
  133. return NULL;
  134. }
  135. static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
  136. {
  137. union ia64_rr rr;
  138. struct thash_data *head;
  139. unsigned long ps, gpaddr;
  140. ps = itir_ps(itir);
  141. rr.val = ia64_get_rr(ifa);
  142. gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
  143. (ifa & ((1UL << ps) - 1));
  144. head = (struct thash_data *)ia64_thash(ifa);
  145. head->etag = INVALID_TI_TAG;
  146. ia64_mf();
  147. head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
  148. head->itir = rr.ps << 2;
  149. head->etag = ia64_ttag(ifa);
  150. head->gpaddr = gpaddr;
  151. }
  152. void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
  153. {
  154. u64 i, dirty_pages = 1;
  155. u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
  156. spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
  157. void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
  158. dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
  159. vmm_spin_lock(lock);
  160. for (i = 0; i < dirty_pages; i++) {
  161. /* avoid RMW */
  162. if (!test_bit(base_gfn + i, dirty_bitmap))
  163. set_bit(base_gfn + i , dirty_bitmap);
  164. }
  165. vmm_spin_unlock(lock);
  166. }
  167. void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
  168. {
  169. u64 phy_pte, psr;
  170. union ia64_rr mrr;
  171. mrr.val = ia64_get_rr(va);
  172. phy_pte = translate_phy_pte(&pte, itir, va);
  173. if (itir_ps(itir) >= mrr.ps) {
  174. vhpt_insert(phy_pte, itir, va, pte);
  175. } else {
  176. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  177. psr = ia64_clear_ic();
  178. ia64_itc(type, va, phy_pte, itir_ps(itir));
  179. paravirt_dv_serialize_data();
  180. ia64_set_psr(psr);
  181. }
  182. if (!(pte&VTLB_PTE_IO))
  183. mark_pages_dirty(v, pte, itir_ps(itir));
  184. }
  185. /*
  186. * vhpt lookup
  187. */
  188. struct thash_data *vhpt_lookup(u64 va)
  189. {
  190. struct thash_data *head;
  191. u64 tag;
  192. head = (struct thash_data *)ia64_thash(va);
  193. tag = ia64_ttag(va);
  194. if (head->etag == tag)
  195. return head;
  196. return NULL;
  197. }
  198. u64 guest_vhpt_lookup(u64 iha, u64 *pte)
  199. {
  200. u64 ret;
  201. struct thash_data *data;
  202. data = __vtr_lookup(current_vcpu, iha, D_TLB);
  203. if (data != NULL)
  204. thash_vhpt_insert(current_vcpu, data->page_flags,
  205. data->itir, iha, D_TLB);
  206. asm volatile ("rsm psr.ic|psr.i;;"
  207. "srlz.d;;"
  208. "ld8.s r9=[%1];;"
  209. "tnat.nz p6,p7=r9;;"
  210. "(p6) mov %0=1;"
  211. "(p6) mov r9=r0;"
  212. "(p7) extr.u r9=r9,0,53;;"
  213. "(p7) mov %0=r0;"
  214. "(p7) st8 [%2]=r9;;"
  215. "ssm psr.ic;;"
  216. "srlz.d;;"
  217. /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
  218. : "=r"(ret) : "r"(iha), "r"(pte):"memory");
  219. return ret;
  220. }
  221. /*
  222. * purge software guest tlb
  223. */
  224. static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  225. {
  226. struct thash_data *cur;
  227. u64 start, curadr, size, psbits, tag, rr_ps, num;
  228. union ia64_rr vrr;
  229. struct thash_cb *hcb = &v->arch.vtlb;
  230. vrr.val = vcpu_get_rr(v, va);
  231. psbits = VMX(v, psbits[(va >> 61)]);
  232. start = va & ~((1UL << ps) - 1);
  233. while (psbits) {
  234. curadr = start;
  235. rr_ps = __ffs(psbits);
  236. psbits &= ~(1UL << rr_ps);
  237. num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
  238. size = PSIZE(rr_ps);
  239. vrr.ps = rr_ps;
  240. while (num) {
  241. cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
  242. if (cur->etag == tag && cur->ps == rr_ps)
  243. cur->etag = INVALID_TI_TAG;
  244. curadr += size;
  245. num--;
  246. }
  247. }
  248. }
  249. /*
  250. * purge VHPT and machine TLB
  251. */
  252. static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  253. {
  254. struct thash_data *cur;
  255. u64 start, size, tag, num;
  256. union ia64_rr rr;
  257. start = va & ~((1UL << ps) - 1);
  258. rr.val = ia64_get_rr(va);
  259. size = PSIZE(rr.ps);
  260. num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
  261. while (num) {
  262. cur = (struct thash_data *)ia64_thash(start);
  263. tag = ia64_ttag(start);
  264. if (cur->etag == tag)
  265. cur->etag = INVALID_TI_TAG;
  266. start += size;
  267. num--;
  268. }
  269. machine_tlb_purge(va, ps);
  270. }
  271. /*
  272. * Insert an entry into hash TLB or VHPT.
  273. * NOTES:
  274. * 1: When inserting VHPT to thash, "va" is a must covered
  275. * address by the inserted machine VHPT entry.
  276. * 2: The format of entry is always in TLB.
  277. * 3: The caller need to make sure the new entry will not overlap
  278. * with any existed entry.
  279. */
  280. void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
  281. {
  282. struct thash_data *head;
  283. union ia64_rr vrr;
  284. u64 tag;
  285. struct thash_cb *hcb = &v->arch.vtlb;
  286. vrr.val = vcpu_get_rr(v, va);
  287. vrr.ps = itir_ps(itir);
  288. VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
  289. head = vsa_thash(hcb->pta, va, vrr.val, &tag);
  290. head->page_flags = pte;
  291. head->itir = itir;
  292. head->etag = tag;
  293. }
  294. int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
  295. {
  296. struct thash_data *trp;
  297. int i;
  298. u64 end, rid;
  299. rid = vcpu_get_rr(vcpu, va);
  300. rid = rid & RR_RID_MASK;
  301. end = va + PSIZE(ps);
  302. if (type == D_TLB) {
  303. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  304. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  305. i < NDTRS; i++, trp++) {
  306. if (__is_tr_overlap(trp, rid, va, end))
  307. return i;
  308. }
  309. }
  310. } else {
  311. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  312. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  313. i < NITRS; i++, trp++) {
  314. if (__is_tr_overlap(trp, rid, va, end))
  315. return i;
  316. }
  317. }
  318. }
  319. return -1;
  320. }
  321. /*
  322. * Purge entries in VTLB and VHPT
  323. */
  324. void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
  325. {
  326. if (vcpu_quick_region_check(v->arch.tc_regions, va))
  327. vtlb_purge(v, va, ps);
  328. vhpt_purge(v, va, ps);
  329. }
  330. void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
  331. {
  332. u64 old_va = va;
  333. va = REGION_OFFSET(va);
  334. if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
  335. vtlb_purge(v, va, ps);
  336. vhpt_purge(v, va, ps);
  337. }
  338. u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
  339. {
  340. u64 ps, ps_mask, paddr, maddr, io_mask;
  341. union pte_flags phy_pte;
  342. ps = itir_ps(itir);
  343. ps_mask = ~((1UL << ps) - 1);
  344. phy_pte.val = *pte;
  345. paddr = *pte;
  346. paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
  347. maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
  348. io_mask = maddr & GPFN_IO_MASK;
  349. if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
  350. *pte |= VTLB_PTE_IO;
  351. return -1;
  352. }
  353. maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
  354. (paddr & ~PAGE_MASK);
  355. phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
  356. return phy_pte.val;
  357. }
  358. /*
  359. * Purge overlap TCs and then insert the new entry to emulate itc ops.
  360. * Notes: Only TC entry can purge and insert.
  361. */
  362. void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
  363. u64 ifa, int type)
  364. {
  365. u64 ps;
  366. u64 phy_pte, io_mask, index;
  367. union ia64_rr vrr, mrr;
  368. ps = itir_ps(itir);
  369. vrr.val = vcpu_get_rr(v, ifa);
  370. mrr.val = ia64_get_rr(ifa);
  371. index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  372. io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
  373. phy_pte = translate_phy_pte(&pte, itir, ifa);
  374. /* Ensure WB attribute if pte is related to a normal mem page,
  375. * which is required by vga acceleration since qemu maps shared
  376. * vram buffer with WB.
  377. */
  378. if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
  379. io_mask != GPFN_PHYS_MMIO) {
  380. pte &= ~_PAGE_MA_MASK;
  381. phy_pte &= ~_PAGE_MA_MASK;
  382. }
  383. vtlb_purge(v, ifa, ps);
  384. vhpt_purge(v, ifa, ps);
  385. if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
  386. vtlb_insert(v, pte, itir, ifa);
  387. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  388. }
  389. if (pte & VTLB_PTE_IO)
  390. return;
  391. if (ps >= mrr.ps)
  392. vhpt_insert(phy_pte, itir, ifa, pte);
  393. else {
  394. u64 psr;
  395. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  396. psr = ia64_clear_ic();
  397. ia64_itc(type, ifa, phy_pte, ps);
  398. paravirt_dv_serialize_data();
  399. ia64_set_psr(psr);
  400. }
  401. if (!(pte&VTLB_PTE_IO))
  402. mark_pages_dirty(v, pte, ps);
  403. }
  404. /*
  405. * Purge all TCs or VHPT entries including those in Hash table.
  406. *
  407. */
  408. void thash_purge_all(struct kvm_vcpu *v)
  409. {
  410. int i;
  411. struct thash_data *head;
  412. struct thash_cb *vtlb, *vhpt;
  413. vtlb = &v->arch.vtlb;
  414. vhpt = &v->arch.vhpt;
  415. for (i = 0; i < 8; i++)
  416. VMX(v, psbits[i]) = 0;
  417. head = vtlb->hash;
  418. for (i = 0; i < vtlb->num; i++) {
  419. head->page_flags = 0;
  420. head->etag = INVALID_TI_TAG;
  421. head->itir = 0;
  422. head->next = 0;
  423. head++;
  424. };
  425. head = vhpt->hash;
  426. for (i = 0; i < vhpt->num; i++) {
  427. head->page_flags = 0;
  428. head->etag = INVALID_TI_TAG;
  429. head->itir = 0;
  430. head->next = 0;
  431. head++;
  432. };
  433. local_flush_tlb_all();
  434. }
  435. /*
  436. * Lookup the hash table and its collision chain to find an entry
  437. * covering this address rid:va or the entry.
  438. *
  439. * INPUT:
  440. * in: TLB format for both VHPT & TLB.
  441. */
  442. struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
  443. {
  444. struct thash_data *cch;
  445. u64 psbits, ps, tag;
  446. union ia64_rr vrr;
  447. struct thash_cb *hcb = &v->arch.vtlb;
  448. cch = __vtr_lookup(v, va, is_data);;
  449. if (cch)
  450. return cch;
  451. if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
  452. return NULL;
  453. psbits = VMX(v, psbits[(va >> 61)]);
  454. vrr.val = vcpu_get_rr(v, va);
  455. while (psbits) {
  456. ps = __ffs(psbits);
  457. psbits &= ~(1UL << ps);
  458. vrr.ps = ps;
  459. cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
  460. if (cch->etag == tag && cch->ps == ps)
  461. return cch;
  462. }
  463. return NULL;
  464. }
  465. /*
  466. * Initialize internal control data before service.
  467. */
  468. void thash_init(struct thash_cb *hcb, u64 sz)
  469. {
  470. int i;
  471. struct thash_data *head;
  472. hcb->pta.val = (unsigned long)hcb->hash;
  473. hcb->pta.vf = 1;
  474. hcb->pta.ve = 1;
  475. hcb->pta.size = sz;
  476. head = hcb->hash;
  477. for (i = 0; i < hcb->num; i++) {
  478. head->page_flags = 0;
  479. head->itir = 0;
  480. head->etag = INVALID_TI_TAG;
  481. head->next = 0;
  482. head++;
  483. }
  484. }
  485. u64 kvm_get_mpt_entry(u64 gpfn)
  486. {
  487. u64 *base = (u64 *) KVM_P2M_BASE;
  488. if (gpfn >= (KVM_P2M_SIZE >> 3))
  489. panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
  490. return *(base + gpfn);
  491. }
  492. u64 kvm_lookup_mpa(u64 gpfn)
  493. {
  494. u64 maddr;
  495. maddr = kvm_get_mpt_entry(gpfn);
  496. return maddr&_PAGE_PPN_MASK;
  497. }
  498. u64 kvm_gpa_to_mpa(u64 gpa)
  499. {
  500. u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
  501. return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
  502. }
  503. /*
  504. * Fetch guest bundle code.
  505. * INPUT:
  506. * gip: guest ip
  507. * pbundle: used to return fetched bundle.
  508. */
  509. int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
  510. {
  511. u64 gpip = 0; /* guest physical IP*/
  512. u64 *vpa;
  513. struct thash_data *tlb;
  514. u64 maddr;
  515. if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
  516. /* I-side physical mode */
  517. gpip = gip;
  518. } else {
  519. tlb = vtlb_lookup(vcpu, gip, I_TLB);
  520. if (tlb)
  521. gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
  522. (gip & (PSIZE(tlb->ps) - 1));
  523. }
  524. if (gpip) {
  525. maddr = kvm_gpa_to_mpa(gpip);
  526. } else {
  527. tlb = vhpt_lookup(gip);
  528. if (tlb == NULL) {
  529. ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
  530. return IA64_FAULT;
  531. }
  532. maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
  533. | (gip & (PSIZE(tlb->ps) - 1));
  534. }
  535. vpa = (u64 *)__kvm_va(maddr);
  536. pbundle->i64[0] = *vpa++;
  537. pbundle->i64[1] = *vpa;
  538. return IA64_NO_FAULT;
  539. }
  540. void kvm_init_vhpt(struct kvm_vcpu *v)
  541. {
  542. v->arch.vhpt.num = VHPT_NUM_ENTRIES;
  543. thash_init(&v->arch.vhpt, VHPT_SHIFT);
  544. ia64_set_pta(v->arch.vhpt.pta.val);
  545. /*Enable VHPT here?*/
  546. }
  547. void kvm_init_vtlb(struct kvm_vcpu *v)
  548. {
  549. v->arch.vtlb.num = VTLB_NUM_ENTRIES;
  550. thash_init(&v->arch.vtlb, VTLB_SHIFT);
  551. }