vtlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. * vtlb.c: guest virtual tlb handling module.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
  5. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  6. *
  7. * Copyright (c) 2007, Intel Corporation.
  8. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include "vcpu.h"
  26. #include <linux/rwsem.h>
  27. #include <asm/tlb.h>
  28. /*
  29. * Check to see if the address rid:va is translated by the TLB
  30. */
  31. static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
  32. {
  33. return ((trp->p) && (trp->rid == rid)
  34. && ((va-trp->vadr) < PSIZE(trp->ps)));
  35. }
  36. /*
  37. * Only for GUEST TR format.
  38. */
  39. static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
  40. {
  41. u64 sa1, ea1;
  42. if (!trp->p || trp->rid != rid)
  43. return 0;
  44. sa1 = trp->vadr;
  45. ea1 = sa1 + PSIZE(trp->ps) - 1;
  46. eva -= 1;
  47. if ((sva > ea1) || (sa1 > eva))
  48. return 0;
  49. else
  50. return 1;
  51. }
  52. void machine_tlb_purge(u64 va, u64 ps)
  53. {
  54. ia64_ptcl(va, ps << 2);
  55. }
  56. void local_flush_tlb_all(void)
  57. {
  58. int i, j;
  59. unsigned long flags, count0, count1;
  60. unsigned long stride0, stride1, addr;
  61. addr = current_vcpu->arch.ptce_base;
  62. count0 = current_vcpu->arch.ptce_count[0];
  63. count1 = current_vcpu->arch.ptce_count[1];
  64. stride0 = current_vcpu->arch.ptce_stride[0];
  65. stride1 = current_vcpu->arch.ptce_stride[1];
  66. local_irq_save(flags);
  67. for (i = 0; i < count0; ++i) {
  68. for (j = 0; j < count1; ++j) {
  69. ia64_ptce(addr);
  70. addr += stride1;
  71. }
  72. addr += stride0;
  73. }
  74. local_irq_restore(flags);
  75. ia64_srlz_i(); /* srlz.i implies srlz.d */
  76. }
  77. int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
  78. {
  79. union ia64_rr vrr;
  80. union ia64_pta vpta;
  81. struct ia64_psr vpsr;
  82. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  83. vrr.val = vcpu_get_rr(vcpu, vadr);
  84. vpta.val = vcpu_get_pta(vcpu);
  85. if (vrr.ve & vpta.ve) {
  86. switch (ref) {
  87. case DATA_REF:
  88. case NA_REF:
  89. return vpsr.dt;
  90. case INST_REF:
  91. return vpsr.dt && vpsr.it && vpsr.ic;
  92. case RSE_REF:
  93. return vpsr.dt && vpsr.rt;
  94. }
  95. }
  96. return 0;
  97. }
  98. struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
  99. {
  100. u64 index, pfn, rid, pfn_bits;
  101. pfn_bits = vpta.size - 5 - 8;
  102. pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
  103. rid = _REGION_ID(vrr);
  104. index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
  105. *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
  106. return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
  107. (index << 5));
  108. }
  109. struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
  110. {
  111. struct thash_data *trp;
  112. int i;
  113. u64 rid;
  114. rid = vcpu_get_rr(vcpu, va);
  115. rid = rid & RR_RID_MASK;
  116. if (type == D_TLB) {
  117. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  118. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  119. i < NDTRS; i++, trp++) {
  120. if (__is_tr_translated(trp, rid, va))
  121. return trp;
  122. }
  123. }
  124. } else {
  125. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  126. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  127. i < NITRS; i++, trp++) {
  128. if (__is_tr_translated(trp, rid, va))
  129. return trp;
  130. }
  131. }
  132. }
  133. return NULL;
  134. }
  135. static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
  136. {
  137. union ia64_rr rr;
  138. struct thash_data *head;
  139. unsigned long ps, gpaddr;
  140. ps = itir_ps(itir);
  141. rr.val = ia64_get_rr(ifa);
  142. gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
  143. (ifa & ((1UL << ps) - 1));
  144. head = (struct thash_data *)ia64_thash(ifa);
  145. head->etag = INVALID_TI_TAG;
  146. ia64_mf();
  147. head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
  148. head->itir = rr.ps << 2;
  149. head->etag = ia64_ttag(ifa);
  150. head->gpaddr = gpaddr;
  151. }
  152. void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
  153. {
  154. u64 i, dirty_pages = 1;
  155. u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
  156. spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
  157. void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
  158. dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
  159. vmm_spin_lock(lock);
  160. for (i = 0; i < dirty_pages; i++) {
  161. /* avoid RMW */
  162. if (!test_bit(base_gfn + i, dirty_bitmap))
  163. set_bit(base_gfn + i , dirty_bitmap);
  164. }
  165. vmm_spin_unlock(lock);
  166. }
  167. void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
  168. {
  169. u64 phy_pte, psr;
  170. union ia64_rr mrr;
  171. mrr.val = ia64_get_rr(va);
  172. phy_pte = translate_phy_pte(&pte, itir, va);
  173. if (itir_ps(itir) >= mrr.ps) {
  174. vhpt_insert(phy_pte, itir, va, pte);
  175. } else {
  176. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  177. psr = ia64_clear_ic();
  178. ia64_itc(type, va, phy_pte, itir_ps(itir));
  179. paravirt_dv_serialize_data();
  180. ia64_set_psr(psr);
  181. }
  182. if (!(pte&VTLB_PTE_IO))
  183. mark_pages_dirty(v, pte, itir_ps(itir));
  184. }
  185. /*
  186. * vhpt lookup
  187. */
  188. struct thash_data *vhpt_lookup(u64 va)
  189. {
  190. struct thash_data *head;
  191. u64 tag;
  192. head = (struct thash_data *)ia64_thash(va);
  193. tag = ia64_ttag(va);
  194. if (head->etag == tag)
  195. return head;
  196. return NULL;
  197. }
  198. u64 guest_vhpt_lookup(u64 iha, u64 *pte)
  199. {
  200. u64 ret;
  201. struct thash_data *data;
  202. data = __vtr_lookup(current_vcpu, iha, D_TLB);
  203. if (data != NULL)
  204. thash_vhpt_insert(current_vcpu, data->page_flags,
  205. data->itir, iha, D_TLB);
  206. asm volatile ("rsm psr.ic|psr.i;;"
  207. "srlz.d;;"
  208. "ld8.s r9=[%1];;"
  209. "tnat.nz p6,p7=r9;;"
  210. "(p6) mov %0=1;"
  211. "(p6) mov r9=r0;"
  212. "(p7) extr.u r9=r9,0,53;;"
  213. "(p7) mov %0=r0;"
  214. "(p7) st8 [%2]=r9;;"
  215. "ssm psr.ic;;"
  216. "srlz.d;;"
  217. "ssm psr.i;;"
  218. "srlz.d;;"
  219. : "=r"(ret) : "r"(iha), "r"(pte):"memory");
  220. return ret;
  221. }
  222. /*
  223. * purge software guest tlb
  224. */
  225. static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  226. {
  227. struct thash_data *cur;
  228. u64 start, curadr, size, psbits, tag, rr_ps, num;
  229. union ia64_rr vrr;
  230. struct thash_cb *hcb = &v->arch.vtlb;
  231. vrr.val = vcpu_get_rr(v, va);
  232. psbits = VMX(v, psbits[(va >> 61)]);
  233. start = va & ~((1UL << ps) - 1);
  234. while (psbits) {
  235. curadr = start;
  236. rr_ps = __ffs(psbits);
  237. psbits &= ~(1UL << rr_ps);
  238. num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
  239. size = PSIZE(rr_ps);
  240. vrr.ps = rr_ps;
  241. while (num) {
  242. cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
  243. if (cur->etag == tag && cur->ps == rr_ps)
  244. cur->etag = INVALID_TI_TAG;
  245. curadr += size;
  246. num--;
  247. }
  248. }
  249. }
  250. /*
  251. * purge VHPT and machine TLB
  252. */
  253. static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  254. {
  255. struct thash_data *cur;
  256. u64 start, size, tag, num;
  257. union ia64_rr rr;
  258. start = va & ~((1UL << ps) - 1);
  259. rr.val = ia64_get_rr(va);
  260. size = PSIZE(rr.ps);
  261. num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
  262. while (num) {
  263. cur = (struct thash_data *)ia64_thash(start);
  264. tag = ia64_ttag(start);
  265. if (cur->etag == tag)
  266. cur->etag = INVALID_TI_TAG;
  267. start += size;
  268. num--;
  269. }
  270. machine_tlb_purge(va, ps);
  271. }
  272. /*
  273. * Insert an entry into hash TLB or VHPT.
  274. * NOTES:
  275. * 1: When inserting VHPT to thash, "va" is a must covered
  276. * address by the inserted machine VHPT entry.
  277. * 2: The format of entry is always in TLB.
  278. * 3: The caller need to make sure the new entry will not overlap
  279. * with any existed entry.
  280. */
  281. void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
  282. {
  283. struct thash_data *head;
  284. union ia64_rr vrr;
  285. u64 tag;
  286. struct thash_cb *hcb = &v->arch.vtlb;
  287. vrr.val = vcpu_get_rr(v, va);
  288. vrr.ps = itir_ps(itir);
  289. VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
  290. head = vsa_thash(hcb->pta, va, vrr.val, &tag);
  291. head->page_flags = pte;
  292. head->itir = itir;
  293. head->etag = tag;
  294. }
  295. int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
  296. {
  297. struct thash_data *trp;
  298. int i;
  299. u64 end, rid;
  300. rid = vcpu_get_rr(vcpu, va);
  301. rid = rid & RR_RID_MASK;
  302. end = va + PSIZE(ps);
  303. if (type == D_TLB) {
  304. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  305. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  306. i < NDTRS; i++, trp++) {
  307. if (__is_tr_overlap(trp, rid, va, end))
  308. return i;
  309. }
  310. }
  311. } else {
  312. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  313. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  314. i < NITRS; i++, trp++) {
  315. if (__is_tr_overlap(trp, rid, va, end))
  316. return i;
  317. }
  318. }
  319. }
  320. return -1;
  321. }
  322. /*
  323. * Purge entries in VTLB and VHPT
  324. */
  325. void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
  326. {
  327. if (vcpu_quick_region_check(v->arch.tc_regions, va))
  328. vtlb_purge(v, va, ps);
  329. vhpt_purge(v, va, ps);
  330. }
  331. void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
  332. {
  333. u64 old_va = va;
  334. va = REGION_OFFSET(va);
  335. if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
  336. vtlb_purge(v, va, ps);
  337. vhpt_purge(v, va, ps);
  338. }
  339. u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
  340. {
  341. u64 ps, ps_mask, paddr, maddr, io_mask;
  342. union pte_flags phy_pte;
  343. ps = itir_ps(itir);
  344. ps_mask = ~((1UL << ps) - 1);
  345. phy_pte.val = *pte;
  346. paddr = *pte;
  347. paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
  348. maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
  349. io_mask = maddr & GPFN_IO_MASK;
  350. if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
  351. *pte |= VTLB_PTE_IO;
  352. return -1;
  353. }
  354. maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
  355. (paddr & ~PAGE_MASK);
  356. phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
  357. return phy_pte.val;
  358. }
  359. /*
  360. * Purge overlap TCs and then insert the new entry to emulate itc ops.
  361. * Notes: Only TC entry can purge and insert.
  362. */
  363. void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
  364. u64 ifa, int type)
  365. {
  366. u64 ps;
  367. u64 phy_pte, io_mask, index;
  368. union ia64_rr vrr, mrr;
  369. ps = itir_ps(itir);
  370. vrr.val = vcpu_get_rr(v, ifa);
  371. mrr.val = ia64_get_rr(ifa);
  372. index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  373. io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
  374. phy_pte = translate_phy_pte(&pte, itir, ifa);
  375. /* Ensure WB attribute if pte is related to a normal mem page,
  376. * which is required by vga acceleration since qemu maps shared
  377. * vram buffer with WB.
  378. */
  379. if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
  380. io_mask != GPFN_PHYS_MMIO) {
  381. pte &= ~_PAGE_MA_MASK;
  382. phy_pte &= ~_PAGE_MA_MASK;
  383. }
  384. vtlb_purge(v, ifa, ps);
  385. vhpt_purge(v, ifa, ps);
  386. if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
  387. vtlb_insert(v, pte, itir, ifa);
  388. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  389. }
  390. if (pte & VTLB_PTE_IO)
  391. return;
  392. if (ps >= mrr.ps)
  393. vhpt_insert(phy_pte, itir, ifa, pte);
  394. else {
  395. u64 psr;
  396. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  397. psr = ia64_clear_ic();
  398. ia64_itc(type, ifa, phy_pte, ps);
  399. paravirt_dv_serialize_data();
  400. ia64_set_psr(psr);
  401. }
  402. if (!(pte&VTLB_PTE_IO))
  403. mark_pages_dirty(v, pte, ps);
  404. }
  405. /*
  406. * Purge all TCs or VHPT entries including those in Hash table.
  407. *
  408. */
  409. void thash_purge_all(struct kvm_vcpu *v)
  410. {
  411. int i;
  412. struct thash_data *head;
  413. struct thash_cb *vtlb, *vhpt;
  414. vtlb = &v->arch.vtlb;
  415. vhpt = &v->arch.vhpt;
  416. for (i = 0; i < 8; i++)
  417. VMX(v, psbits[i]) = 0;
  418. head = vtlb->hash;
  419. for (i = 0; i < vtlb->num; i++) {
  420. head->page_flags = 0;
  421. head->etag = INVALID_TI_TAG;
  422. head->itir = 0;
  423. head->next = 0;
  424. head++;
  425. };
  426. head = vhpt->hash;
  427. for (i = 0; i < vhpt->num; i++) {
  428. head->page_flags = 0;
  429. head->etag = INVALID_TI_TAG;
  430. head->itir = 0;
  431. head->next = 0;
  432. head++;
  433. };
  434. local_flush_tlb_all();
  435. }
  436. /*
  437. * Lookup the hash table and its collision chain to find an entry
  438. * covering this address rid:va or the entry.
  439. *
  440. * INPUT:
  441. * in: TLB format for both VHPT & TLB.
  442. */
  443. struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
  444. {
  445. struct thash_data *cch;
  446. u64 psbits, ps, tag;
  447. union ia64_rr vrr;
  448. struct thash_cb *hcb = &v->arch.vtlb;
  449. cch = __vtr_lookup(v, va, is_data);
  450. if (cch)
  451. return cch;
  452. if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
  453. return NULL;
  454. psbits = VMX(v, psbits[(va >> 61)]);
  455. vrr.val = vcpu_get_rr(v, va);
  456. while (psbits) {
  457. ps = __ffs(psbits);
  458. psbits &= ~(1UL << ps);
  459. vrr.ps = ps;
  460. cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
  461. if (cch->etag == tag && cch->ps == ps)
  462. return cch;
  463. }
  464. return NULL;
  465. }
  466. /*
  467. * Initialize internal control data before service.
  468. */
  469. void thash_init(struct thash_cb *hcb, u64 sz)
  470. {
  471. int i;
  472. struct thash_data *head;
  473. hcb->pta.val = (unsigned long)hcb->hash;
  474. hcb->pta.vf = 1;
  475. hcb->pta.ve = 1;
  476. hcb->pta.size = sz;
  477. head = hcb->hash;
  478. for (i = 0; i < hcb->num; i++) {
  479. head->page_flags = 0;
  480. head->itir = 0;
  481. head->etag = INVALID_TI_TAG;
  482. head->next = 0;
  483. head++;
  484. }
  485. }
  486. u64 kvm_get_mpt_entry(u64 gpfn)
  487. {
  488. u64 *base = (u64 *) KVM_P2M_BASE;
  489. if (gpfn >= (KVM_P2M_SIZE >> 3))
  490. panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
  491. return *(base + gpfn);
  492. }
  493. u64 kvm_lookup_mpa(u64 gpfn)
  494. {
  495. u64 maddr;
  496. maddr = kvm_get_mpt_entry(gpfn);
  497. return maddr&_PAGE_PPN_MASK;
  498. }
  499. u64 kvm_gpa_to_mpa(u64 gpa)
  500. {
  501. u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
  502. return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
  503. }
  504. /*
  505. * Fetch guest bundle code.
  506. * INPUT:
  507. * gip: guest ip
  508. * pbundle: used to return fetched bundle.
  509. */
  510. int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
  511. {
  512. u64 gpip = 0; /* guest physical IP*/
  513. u64 *vpa;
  514. struct thash_data *tlb;
  515. u64 maddr;
  516. if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
  517. /* I-side physical mode */
  518. gpip = gip;
  519. } else {
  520. tlb = vtlb_lookup(vcpu, gip, I_TLB);
  521. if (tlb)
  522. gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
  523. (gip & (PSIZE(tlb->ps) - 1));
  524. }
  525. if (gpip) {
  526. maddr = kvm_gpa_to_mpa(gpip);
  527. } else {
  528. tlb = vhpt_lookup(gip);
  529. if (tlb == NULL) {
  530. ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
  531. return IA64_FAULT;
  532. }
  533. maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
  534. | (gip & (PSIZE(tlb->ps) - 1));
  535. }
  536. vpa = (u64 *)__kvm_va(maddr);
  537. pbundle->i64[0] = *vpa++;
  538. pbundle->i64[1] = *vpa;
  539. return IA64_NO_FAULT;
  540. }
  541. void kvm_init_vhpt(struct kvm_vcpu *v)
  542. {
  543. v->arch.vhpt.num = VHPT_NUM_ENTRIES;
  544. thash_init(&v->arch.vhpt, VHPT_SHIFT);
  545. ia64_set_pta(v->arch.vhpt.pta.val);
  546. /*Enable VHPT here?*/
  547. }
  548. void kvm_init_vtlb(struct kvm_vcpu *v)
  549. {
  550. v->arch.vtlb.num = VTLB_NUM_ENTRIES;
  551. thash_init(&v->arch.vtlb, VTLB_SHIFT);
  552. }