vtlb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * vtlb.c: guest virtual tlb handling module.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
  5. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  6. *
  7. * Copyright (c) 2007, Intel Corporation.
  8. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include "vcpu.h"
  26. #include <linux/rwsem.h>
  27. #include <asm/tlb.h>
  28. /*
  29. * Check to see if the address rid:va is translated by the TLB
  30. */
  31. static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
  32. {
  33. return ((trp->p) && (trp->rid == rid)
  34. && ((va-trp->vadr) < PSIZE(trp->ps)));
  35. }
  36. /*
  37. * Only for GUEST TR format.
  38. */
  39. static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
  40. {
  41. u64 sa1, ea1;
  42. if (!trp->p || trp->rid != rid)
  43. return 0;
  44. sa1 = trp->vadr;
  45. ea1 = sa1 + PSIZE(trp->ps) - 1;
  46. eva -= 1;
  47. if ((sva > ea1) || (sa1 > eva))
  48. return 0;
  49. else
  50. return 1;
  51. }
  52. void machine_tlb_purge(u64 va, u64 ps)
  53. {
  54. ia64_ptcl(va, ps << 2);
  55. }
  56. void local_flush_tlb_all(void)
  57. {
  58. int i, j;
  59. unsigned long flags, count0, count1;
  60. unsigned long stride0, stride1, addr;
  61. addr = current_vcpu->arch.ptce_base;
  62. count0 = current_vcpu->arch.ptce_count[0];
  63. count1 = current_vcpu->arch.ptce_count[1];
  64. stride0 = current_vcpu->arch.ptce_stride[0];
  65. stride1 = current_vcpu->arch.ptce_stride[1];
  66. local_irq_save(flags);
  67. for (i = 0; i < count0; ++i) {
  68. for (j = 0; j < count1; ++j) {
  69. ia64_ptce(addr);
  70. addr += stride1;
  71. }
  72. addr += stride0;
  73. }
  74. local_irq_restore(flags);
  75. ia64_srlz_i(); /* srlz.i implies srlz.d */
  76. }
  77. int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
  78. {
  79. union ia64_rr vrr;
  80. union ia64_pta vpta;
  81. struct ia64_psr vpsr;
  82. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  83. vrr.val = vcpu_get_rr(vcpu, vadr);
  84. vpta.val = vcpu_get_pta(vcpu);
  85. if (vrr.ve & vpta.ve) {
  86. switch (ref) {
  87. case DATA_REF:
  88. case NA_REF:
  89. return vpsr.dt;
  90. case INST_REF:
  91. return vpsr.dt && vpsr.it && vpsr.ic;
  92. case RSE_REF:
  93. return vpsr.dt && vpsr.rt;
  94. }
  95. }
  96. return 0;
  97. }
  98. struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
  99. {
  100. u64 index, pfn, rid, pfn_bits;
  101. pfn_bits = vpta.size - 5 - 8;
  102. pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
  103. rid = _REGION_ID(vrr);
  104. index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
  105. *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
  106. return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
  107. (index << 5));
  108. }
  109. struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
  110. {
  111. struct thash_data *trp;
  112. int i;
  113. u64 rid;
  114. rid = vcpu_get_rr(vcpu, va);
  115. rid = rid & RR_RID_MASK;;
  116. if (type == D_TLB) {
  117. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  118. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  119. i < NDTRS; i++, trp++) {
  120. if (__is_tr_translated(trp, rid, va))
  121. return trp;
  122. }
  123. }
  124. } else {
  125. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  126. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  127. i < NITRS; i++, trp++) {
  128. if (__is_tr_translated(trp, rid, va))
  129. return trp;
  130. }
  131. }
  132. }
  133. return NULL;
  134. }
  135. static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
  136. {
  137. union ia64_rr rr;
  138. struct thash_data *head;
  139. unsigned long ps, gpaddr;
  140. ps = itir_ps(itir);
  141. gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
  142. (ifa & ((1UL << ps) - 1));
  143. rr.val = ia64_get_rr(ifa);
  144. head = (struct thash_data *)ia64_thash(ifa);
  145. head->etag = INVALID_TI_TAG;
  146. ia64_mf();
  147. head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
  148. head->itir = rr.ps << 2;
  149. head->etag = ia64_ttag(ifa);
  150. head->gpaddr = gpaddr;
  151. }
  152. void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
  153. {
  154. u64 i, dirty_pages = 1;
  155. u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
  156. spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
  157. void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE)
  158. + KVM_MEM_DIRTY_LOG_OFS;
  159. dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
  160. vmm_spin_lock(lock);
  161. for (i = 0; i < dirty_pages; i++) {
  162. /* avoid RMW */
  163. if (!test_bit(base_gfn + i, dirty_bitmap))
  164. set_bit(base_gfn + i , dirty_bitmap);
  165. }
  166. vmm_spin_unlock(lock);
  167. }
  168. void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
  169. {
  170. u64 phy_pte, psr;
  171. union ia64_rr mrr;
  172. mrr.val = ia64_get_rr(va);
  173. phy_pte = translate_phy_pte(&pte, itir, va);
  174. if (itir_ps(itir) >= mrr.ps) {
  175. vhpt_insert(phy_pte, itir, va, pte);
  176. } else {
  177. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  178. psr = ia64_clear_ic();
  179. ia64_itc(type, va, phy_pte, itir_ps(itir));
  180. ia64_set_psr(psr);
  181. }
  182. if (!(pte&VTLB_PTE_IO))
  183. mark_pages_dirty(v, pte, itir_ps(itir));
  184. }
  185. /*
  186. * vhpt lookup
  187. */
  188. struct thash_data *vhpt_lookup(u64 va)
  189. {
  190. struct thash_data *head;
  191. u64 tag;
  192. head = (struct thash_data *)ia64_thash(va);
  193. tag = ia64_ttag(va);
  194. if (head->etag == tag)
  195. return head;
  196. return NULL;
  197. }
  198. u64 guest_vhpt_lookup(u64 iha, u64 *pte)
  199. {
  200. u64 ret;
  201. struct thash_data *data;
  202. data = __vtr_lookup(current_vcpu, iha, D_TLB);
  203. if (data != NULL)
  204. thash_vhpt_insert(current_vcpu, data->page_flags,
  205. data->itir, iha, D_TLB);
  206. asm volatile ("rsm psr.ic|psr.i;;"
  207. "srlz.d;;"
  208. "ld8.s r9=[%1];;"
  209. "tnat.nz p6,p7=r9;;"
  210. "(p6) mov %0=1;"
  211. "(p6) mov r9=r0;"
  212. "(p7) extr.u r9=r9,0,53;;"
  213. "(p7) mov %0=r0;"
  214. "(p7) st8 [%2]=r9;;"
  215. "ssm psr.ic;;"
  216. "srlz.d;;"
  217. /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
  218. : "=r"(ret) : "r"(iha), "r"(pte):"memory");
  219. return ret;
  220. }
  221. /*
  222. * purge software guest tlb
  223. */
  224. static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  225. {
  226. struct thash_data *cur;
  227. u64 start, curadr, size, psbits, tag, rr_ps, num;
  228. union ia64_rr vrr;
  229. struct thash_cb *hcb = &v->arch.vtlb;
  230. vrr.val = vcpu_get_rr(v, va);
  231. psbits = VMX(v, psbits[(va >> 61)]);
  232. start = va & ~((1UL << ps) - 1);
  233. while (psbits) {
  234. curadr = start;
  235. rr_ps = __ffs(psbits);
  236. psbits &= ~(1UL << rr_ps);
  237. num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
  238. size = PSIZE(rr_ps);
  239. vrr.ps = rr_ps;
  240. while (num) {
  241. cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
  242. if (cur->etag == tag && cur->ps == rr_ps)
  243. cur->etag = INVALID_TI_TAG;
  244. curadr += size;
  245. num--;
  246. }
  247. }
  248. }
  249. /*
  250. * purge VHPT and machine TLB
  251. */
  252. static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
  253. {
  254. struct thash_data *cur;
  255. u64 start, size, tag, num;
  256. union ia64_rr rr;
  257. start = va & ~((1UL << ps) - 1);
  258. rr.val = ia64_get_rr(va);
  259. size = PSIZE(rr.ps);
  260. num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
  261. while (num) {
  262. cur = (struct thash_data *)ia64_thash(start);
  263. tag = ia64_ttag(start);
  264. if (cur->etag == tag)
  265. cur->etag = INVALID_TI_TAG;
  266. start += size;
  267. num--;
  268. }
  269. machine_tlb_purge(va, ps);
  270. }
  271. /*
  272. * Insert an entry into hash TLB or VHPT.
  273. * NOTES:
  274. * 1: When inserting VHPT to thash, "va" is a must covered
  275. * address by the inserted machine VHPT entry.
  276. * 2: The format of entry is always in TLB.
  277. * 3: The caller need to make sure the new entry will not overlap
  278. * with any existed entry.
  279. */
  280. void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
  281. {
  282. struct thash_data *head;
  283. union ia64_rr vrr;
  284. u64 tag;
  285. struct thash_cb *hcb = &v->arch.vtlb;
  286. vrr.val = vcpu_get_rr(v, va);
  287. vrr.ps = itir_ps(itir);
  288. VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
  289. head = vsa_thash(hcb->pta, va, vrr.val, &tag);
  290. head->page_flags = pte;
  291. head->itir = itir;
  292. head->etag = tag;
  293. }
  294. int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
  295. {
  296. struct thash_data *trp;
  297. int i;
  298. u64 end, rid;
  299. rid = vcpu_get_rr(vcpu, va);
  300. rid = rid & RR_RID_MASK;
  301. end = va + PSIZE(ps);
  302. if (type == D_TLB) {
  303. if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
  304. for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
  305. i < NDTRS; i++, trp++) {
  306. if (__is_tr_overlap(trp, rid, va, end))
  307. return i;
  308. }
  309. }
  310. } else {
  311. if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
  312. for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
  313. i < NITRS; i++, trp++) {
  314. if (__is_tr_overlap(trp, rid, va, end))
  315. return i;
  316. }
  317. }
  318. }
  319. return -1;
  320. }
  321. /*
  322. * Purge entries in VTLB and VHPT
  323. */
  324. void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
  325. {
  326. if (vcpu_quick_region_check(v->arch.tc_regions, va))
  327. vtlb_purge(v, va, ps);
  328. vhpt_purge(v, va, ps);
  329. }
  330. void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
  331. {
  332. u64 old_va = va;
  333. va = REGION_OFFSET(va);
  334. if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
  335. vtlb_purge(v, va, ps);
  336. vhpt_purge(v, va, ps);
  337. }
  338. u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
  339. {
  340. u64 ps, ps_mask, paddr, maddr;
  341. union pte_flags phy_pte;
  342. ps = itir_ps(itir);
  343. ps_mask = ~((1UL << ps) - 1);
  344. phy_pte.val = *pte;
  345. paddr = *pte;
  346. paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
  347. maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
  348. if (maddr & GPFN_IO_MASK) {
  349. *pte |= VTLB_PTE_IO;
  350. return -1;
  351. }
  352. maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
  353. (paddr & ~PAGE_MASK);
  354. phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
  355. return phy_pte.val;
  356. }
  357. /*
  358. * Purge overlap TCs and then insert the new entry to emulate itc ops.
  359. * Notes: Only TC entry can purge and insert.
  360. * 1 indicates this is MMIO
  361. */
  362. int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
  363. u64 ifa, int type)
  364. {
  365. u64 ps;
  366. u64 phy_pte;
  367. union ia64_rr vrr, mrr;
  368. int ret = 0;
  369. ps = itir_ps(itir);
  370. vrr.val = vcpu_get_rr(v, ifa);
  371. mrr.val = ia64_get_rr(ifa);
  372. phy_pte = translate_phy_pte(&pte, itir, ifa);
  373. /* Ensure WB attribute if pte is related to a normal mem page,
  374. * which is required by vga acceleration since qemu maps shared
  375. * vram buffer with WB.
  376. */
  377. if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
  378. pte &= ~_PAGE_MA_MASK;
  379. phy_pte &= ~_PAGE_MA_MASK;
  380. }
  381. if (pte & VTLB_PTE_IO)
  382. ret = 1;
  383. vtlb_purge(v, ifa, ps);
  384. vhpt_purge(v, ifa, ps);
  385. if (ps == mrr.ps) {
  386. if (!(pte&VTLB_PTE_IO)) {
  387. vhpt_insert(phy_pte, itir, ifa, pte);
  388. } else {
  389. vtlb_insert(v, pte, itir, ifa);
  390. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  391. }
  392. } else if (ps > mrr.ps) {
  393. vtlb_insert(v, pte, itir, ifa);
  394. vcpu_quick_region_set(VMX(v, tc_regions), ifa);
  395. if (!(pte&VTLB_PTE_IO))
  396. vhpt_insert(phy_pte, itir, ifa, pte);
  397. } else {
  398. u64 psr;
  399. phy_pte &= ~PAGE_FLAGS_RV_MASK;
  400. psr = ia64_clear_ic();
  401. ia64_itc(type, ifa, phy_pte, ps);
  402. ia64_set_psr(psr);
  403. }
  404. if (!(pte&VTLB_PTE_IO))
  405. mark_pages_dirty(v, pte, ps);
  406. return ret;
  407. }
  408. /*
  409. * Purge all TCs or VHPT entries including those in Hash table.
  410. *
  411. */
  412. void thash_purge_all(struct kvm_vcpu *v)
  413. {
  414. int i;
  415. struct thash_data *head;
  416. struct thash_cb *vtlb, *vhpt;
  417. vtlb = &v->arch.vtlb;
  418. vhpt = &v->arch.vhpt;
  419. for (i = 0; i < 8; i++)
  420. VMX(v, psbits[i]) = 0;
  421. head = vtlb->hash;
  422. for (i = 0; i < vtlb->num; i++) {
  423. head->page_flags = 0;
  424. head->etag = INVALID_TI_TAG;
  425. head->itir = 0;
  426. head->next = 0;
  427. head++;
  428. };
  429. head = vhpt->hash;
  430. for (i = 0; i < vhpt->num; i++) {
  431. head->page_flags = 0;
  432. head->etag = INVALID_TI_TAG;
  433. head->itir = 0;
  434. head->next = 0;
  435. head++;
  436. };
  437. local_flush_tlb_all();
  438. }
  439. /*
  440. * Lookup the hash table and its collision chain to find an entry
  441. * covering this address rid:va or the entry.
  442. *
  443. * INPUT:
  444. * in: TLB format for both VHPT & TLB.
  445. */
  446. struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
  447. {
  448. struct thash_data *cch;
  449. u64 psbits, ps, tag;
  450. union ia64_rr vrr;
  451. struct thash_cb *hcb = &v->arch.vtlb;
  452. cch = __vtr_lookup(v, va, is_data);;
  453. if (cch)
  454. return cch;
  455. if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
  456. return NULL;
  457. psbits = VMX(v, psbits[(va >> 61)]);
  458. vrr.val = vcpu_get_rr(v, va);
  459. while (psbits) {
  460. ps = __ffs(psbits);
  461. psbits &= ~(1UL << ps);
  462. vrr.ps = ps;
  463. cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
  464. if (cch->etag == tag && cch->ps == ps)
  465. return cch;
  466. }
  467. return NULL;
  468. }
  469. /*
  470. * Initialize internal control data before service.
  471. */
  472. void thash_init(struct thash_cb *hcb, u64 sz)
  473. {
  474. int i;
  475. struct thash_data *head;
  476. hcb->pta.val = (unsigned long)hcb->hash;
  477. hcb->pta.vf = 1;
  478. hcb->pta.ve = 1;
  479. hcb->pta.size = sz;
  480. head = hcb->hash;
  481. for (i = 0; i < hcb->num; i++) {
  482. head->page_flags = 0;
  483. head->itir = 0;
  484. head->etag = INVALID_TI_TAG;
  485. head->next = 0;
  486. head++;
  487. }
  488. }
  489. u64 kvm_lookup_mpa(u64 gpfn)
  490. {
  491. u64 *base = (u64 *) KVM_P2M_BASE;
  492. return *(base + gpfn);
  493. }
  494. u64 kvm_gpa_to_mpa(u64 gpa)
  495. {
  496. u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
  497. return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
  498. }
  499. /*
  500. * Fetch guest bundle code.
  501. * INPUT:
  502. * gip: guest ip
  503. * pbundle: used to return fetched bundle.
  504. */
  505. int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
  506. {
  507. u64 gpip = 0; /* guest physical IP*/
  508. u64 *vpa;
  509. struct thash_data *tlb;
  510. u64 maddr;
  511. if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
  512. /* I-side physical mode */
  513. gpip = gip;
  514. } else {
  515. tlb = vtlb_lookup(vcpu, gip, I_TLB);
  516. if (tlb)
  517. gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
  518. (gip & (PSIZE(tlb->ps) - 1));
  519. }
  520. if (gpip) {
  521. maddr = kvm_gpa_to_mpa(gpip);
  522. } else {
  523. tlb = vhpt_lookup(gip);
  524. if (tlb == NULL) {
  525. ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
  526. return IA64_FAULT;
  527. }
  528. maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
  529. | (gip & (PSIZE(tlb->ps) - 1));
  530. }
  531. vpa = (u64 *)__kvm_va(maddr);
  532. pbundle->i64[0] = *vpa++;
  533. pbundle->i64[1] = *vpa;
  534. return IA64_NO_FAULT;
  535. }
  536. void kvm_init_vhpt(struct kvm_vcpu *v)
  537. {
  538. v->arch.vhpt.num = VHPT_NUM_ENTRIES;
  539. thash_init(&v->arch.vhpt, VHPT_SHIFT);
  540. ia64_set_pta(v->arch.vhpt.pta.val);
  541. /*Enable VHPT here?*/
  542. }
  543. void kvm_init_vtlb(struct kvm_vcpu *v)
  544. {
  545. v->arch.vtlb.num = VTLB_NUM_ENTRIES;
  546. thash_init(&v->arch.vtlb, VTLB_SHIFT);
  547. }