vcpu.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225
  1. /*
  2. * kvm_vcpu.c: handling all virtual cpu related thing.
  3. * Copyright (c) 2005, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Shaofan Li (Susue Li) <susie.li@intel.com>
  19. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  20. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21. * Xiantao Zhang <xiantao.zhang@intel.com>
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <asm/processor.h>
  26. #include <asm/ia64regs.h>
  27. #include <asm/gcc_intrin.h>
  28. #include <asm/kregs.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/tlb.h>
  31. #include "asm-offsets.h"
  32. #include "vcpu.h"
  33. /*
  34. * Special notes:
  35. * - Index by it/dt/rt sequence
  36. * - Only existing mode transitions are allowed in this table
  37. * - RSE is placed at lazy mode when emulating guest partial mode
  38. * - If gva happens to be rr0 and rr4, only allowed case is identity
  39. * mapping (gva=gpa), or panic! (How?)
  40. */
  41. int mm_switch_table[8][8] = {
  42. /* 2004/09/12(Kevin): Allow switch to self */
  43. /*
  44. * (it,dt,rt): (0,0,0) -> (1,1,1)
  45. * This kind of transition usually occurs in the very early
  46. * stage of Linux boot up procedure. Another case is in efi
  47. * and pal calls. (see "arch/ia64/kernel/head.S")
  48. *
  49. * (it,dt,rt): (0,0,0) -> (0,1,1)
  50. * This kind of transition is found when OSYa exits efi boot
  51. * service. Due to gva = gpa in this case (Same region),
  52. * data access can be satisfied though itlb entry for physical
  53. * emulation is hit.
  54. */
  55. {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
  56. {0, 0, 0, 0, 0, 0, 0, 0},
  57. {0, 0, 0, 0, 0, 0, 0, 0},
  58. /*
  59. * (it,dt,rt): (0,1,1) -> (1,1,1)
  60. * This kind of transition is found in OSYa.
  61. *
  62. * (it,dt,rt): (0,1,1) -> (0,0,0)
  63. * This kind of transition is found in OSYa
  64. */
  65. {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
  66. /* (1,0,0)->(1,1,1) */
  67. {0, 0, 0, 0, 0, 0, 0, SW_P2V},
  68. /*
  69. * (it,dt,rt): (1,0,1) -> (1,1,1)
  70. * This kind of transition usually occurs when Linux returns
  71. * from the low level TLB miss handlers.
  72. * (see "arch/ia64/kernel/ivt.S")
  73. */
  74. {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
  75. {0, 0, 0, 0, 0, 0, 0, 0},
  76. /*
  77. * (it,dt,rt): (1,1,1) -> (1,0,1)
  78. * This kind of transition usually occurs in Linux low level
  79. * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
  80. *
  81. * (it,dt,rt): (1,1,1) -> (0,0,0)
  82. * This kind of transition usually occurs in pal and efi calls,
  83. * which requires running in physical mode.
  84. * (see "arch/ia64/kernel/head.S")
  85. * (1,1,1)->(1,0,0)
  86. */
  87. {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
  88. };
  89. void physical_mode_init(struct kvm_vcpu *vcpu)
  90. {
  91. vcpu->arch.mode_flags = GUEST_IN_PHY;
  92. }
  93. void switch_to_physical_rid(struct kvm_vcpu *vcpu)
  94. {
  95. unsigned long psr;
  96. /* Save original virtual mode rr[0] and rr[4] */
  97. psr = ia64_clear_ic();
  98. ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
  99. ia64_srlz_d();
  100. ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
  101. ia64_srlz_d();
  102. ia64_set_psr(psr);
  103. return;
  104. }
  105. void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
  106. {
  107. unsigned long psr;
  108. psr = ia64_clear_ic();
  109. ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
  110. ia64_srlz_d();
  111. ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
  112. ia64_srlz_d();
  113. ia64_set_psr(psr);
  114. return;
  115. }
  116. static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
  117. {
  118. return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  119. }
  120. void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  121. struct ia64_psr new_psr)
  122. {
  123. int act;
  124. act = mm_switch_action(old_psr, new_psr);
  125. switch (act) {
  126. case SW_V2P:
  127. /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
  128. old_psr.val, new_psr.val);*/
  129. switch_to_physical_rid(vcpu);
  130. /*
  131. * Set rse to enforced lazy, to prevent active rse
  132. *save/restor when guest physical mode.
  133. */
  134. vcpu->arch.mode_flags |= GUEST_IN_PHY;
  135. break;
  136. case SW_P2V:
  137. switch_to_virtual_rid(vcpu);
  138. /*
  139. * recover old mode which is saved when entering
  140. * guest physical mode
  141. */
  142. vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  143. break;
  144. case SW_SELF:
  145. break;
  146. case SW_NOP:
  147. break;
  148. default:
  149. /* Sanity check */
  150. break;
  151. }
  152. return;
  153. }
  154. /*
  155. * In physical mode, insert tc/tr for region 0 and 4 uses
  156. * RID[0] and RID[4] which is for physical mode emulation.
  157. * However what those inserted tc/tr wants is rid for
  158. * virtual mode. So original virtual rid needs to be restored
  159. * before insert.
  160. *
  161. * Operations which required such switch include:
  162. * - insertions (itc.*, itr.*)
  163. * - purges (ptc.* and ptr.*)
  164. * - tpa
  165. * - tak
  166. * - thash?, ttag?
  167. * All above needs actual virtual rid for destination entry.
  168. */
  169. void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  170. struct ia64_psr new_psr)
  171. {
  172. if ((old_psr.dt != new_psr.dt)
  173. || (old_psr.it != new_psr.it)
  174. || (old_psr.rt != new_psr.rt))
  175. switch_mm_mode(vcpu, old_psr, new_psr);
  176. return;
  177. }
  178. /*
  179. * In physical mode, insert tc/tr for region 0 and 4 uses
  180. * RID[0] and RID[4] which is for physical mode emulation.
  181. * However what those inserted tc/tr wants is rid for
  182. * virtual mode. So original virtual rid needs to be restored
  183. * before insert.
  184. *
  185. * Operations which required such switch include:
  186. * - insertions (itc.*, itr.*)
  187. * - purges (ptc.* and ptr.*)
  188. * - tpa
  189. * - tak
  190. * - thash?, ttag?
  191. * All above needs actual virtual rid for destination entry.
  192. */
  193. void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
  194. {
  195. if (is_physical_mode(vcpu)) {
  196. vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  197. switch_to_virtual_rid(vcpu);
  198. }
  199. return;
  200. }
  201. /* Recover always follows prepare */
  202. void recover_if_physical_mode(struct kvm_vcpu *vcpu)
  203. {
  204. if (is_physical_mode(vcpu))
  205. switch_to_physical_rid(vcpu);
  206. vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  207. return;
  208. }
  209. #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
  210. static u16 gr_info[32] = {
  211. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  212. RPT(r1), RPT(r2), RPT(r3),
  213. RPT(r4), RPT(r5), RPT(r6), RPT(r7),
  214. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  215. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  216. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  217. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  218. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  219. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  220. };
  221. #define IA64_FIRST_STACKED_GR 32
  222. #define IA64_FIRST_ROTATING_FR 32
  223. static inline unsigned long
  224. rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
  225. {
  226. reg += rrb;
  227. if (reg >= sor)
  228. reg -= sor;
  229. return reg;
  230. }
  231. /*
  232. * Return the (rotated) index for floating point register
  233. * be in the REGNUM (REGNUM must range from 32-127,
  234. * result is in the range from 0-95.
  235. */
  236. static inline unsigned long fph_index(struct kvm_pt_regs *regs,
  237. long regnum)
  238. {
  239. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  240. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  241. }
  242. /*
  243. * The inverse of the above: given bspstore and the number of
  244. * registers, calculate ar.bsp.
  245. */
  246. static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
  247. long num_regs)
  248. {
  249. long delta = ia64_rse_slot_num(addr) + num_regs;
  250. int i = 0;
  251. if (num_regs < 0)
  252. delta -= 0x3e;
  253. if (delta < 0) {
  254. while (delta <= -0x3f) {
  255. i--;
  256. delta += 0x3f;
  257. }
  258. } else {
  259. while (delta >= 0x3f) {
  260. i++;
  261. delta -= 0x3f;
  262. }
  263. }
  264. return addr + num_regs + i;
  265. }
  266. static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  267. unsigned long *val, int *nat)
  268. {
  269. unsigned long *bsp, *addr, *rnat_addr, *bspstore;
  270. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  271. unsigned long nat_mask;
  272. unsigned long old_rsc, new_rsc;
  273. long sof = (regs->cr_ifs) & 0x7f;
  274. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  275. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  276. long ridx = r1 - 32;
  277. if (ridx < sor)
  278. ridx = rotate_reg(sor, rrb_gr, ridx);
  279. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  280. new_rsc = old_rsc&(~(0x3));
  281. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  282. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  283. bsp = kbs + (regs->loadrs >> 19);
  284. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. rnat_addr = ia64_rse_rnat_addr(addr);
  287. if (addr >= bspstore) {
  288. ia64_flushrs();
  289. ia64_mf();
  290. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  291. }
  292. *val = *addr;
  293. if (nat) {
  294. if (bspstore < rnat_addr)
  295. *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
  296. & nat_mask);
  297. else
  298. *nat = (int)!!((*rnat_addr) & nat_mask);
  299. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  300. }
  301. }
  302. void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  303. unsigned long val, unsigned long nat)
  304. {
  305. unsigned long *bsp, *bspstore, *addr, *rnat_addr;
  306. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  307. unsigned long nat_mask;
  308. unsigned long old_rsc, new_rsc, psr;
  309. unsigned long rnat;
  310. long sof = (regs->cr_ifs) & 0x7f;
  311. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  312. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  313. long ridx = r1 - 32;
  314. if (ridx < sor)
  315. ridx = rotate_reg(sor, rrb_gr, ridx);
  316. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  317. /* put RSC to lazy mode, and set loadrs 0 */
  318. new_rsc = old_rsc & (~0x3fff0003);
  319. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  320. bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
  321. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  322. nat_mask = 1UL << ia64_rse_slot_num(addr);
  323. rnat_addr = ia64_rse_rnat_addr(addr);
  324. local_irq_save(psr);
  325. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  326. if (addr >= bspstore) {
  327. ia64_flushrs();
  328. ia64_mf();
  329. *addr = val;
  330. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  331. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  332. if (bspstore < rnat_addr)
  333. rnat = rnat & (~nat_mask);
  334. else
  335. *rnat_addr = (*rnat_addr)&(~nat_mask);
  336. ia64_mf();
  337. ia64_loadrs();
  338. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  339. } else {
  340. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  341. *addr = val;
  342. if (bspstore < rnat_addr)
  343. rnat = rnat&(~nat_mask);
  344. else
  345. *rnat_addr = (*rnat_addr) & (~nat_mask);
  346. ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
  347. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  348. }
  349. local_irq_restore(psr);
  350. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  351. }
  352. void getreg(unsigned long regnum, unsigned long *val,
  353. int *nat, struct kvm_pt_regs *regs)
  354. {
  355. unsigned long addr, *unat;
  356. if (regnum >= IA64_FIRST_STACKED_GR) {
  357. get_rse_reg(regs, regnum, val, nat);
  358. return;
  359. }
  360. /*
  361. * Now look at registers in [0-31] range and init correct UNAT
  362. */
  363. addr = (unsigned long)regs;
  364. unat = &regs->eml_unat;;
  365. addr += gr_info[regnum];
  366. *val = *(unsigned long *)addr;
  367. /*
  368. * do it only when requested
  369. */
  370. if (nat)
  371. *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
  372. }
  373. void setreg(unsigned long regnum, unsigned long val,
  374. int nat, struct kvm_pt_regs *regs)
  375. {
  376. unsigned long addr;
  377. unsigned long bitmask;
  378. unsigned long *unat;
  379. /*
  380. * First takes care of stacked registers
  381. */
  382. if (regnum >= IA64_FIRST_STACKED_GR) {
  383. set_rse_reg(regs, regnum, val, nat);
  384. return;
  385. }
  386. /*
  387. * Now look at registers in [0-31] range and init correct UNAT
  388. */
  389. addr = (unsigned long)regs;
  390. unat = &regs->eml_unat;
  391. /*
  392. * add offset from base of struct
  393. * and do it !
  394. */
  395. addr += gr_info[regnum];
  396. *(unsigned long *)addr = val;
  397. /*
  398. * We need to clear the corresponding UNAT bit to fully emulate the load
  399. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  400. */
  401. bitmask = 1UL << ((addr >> 3) & 0x3f);
  402. if (nat)
  403. *unat |= bitmask;
  404. else
  405. *unat &= ~bitmask;
  406. }
  407. u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
  408. {
  409. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  410. u64 val;
  411. if (!reg)
  412. return 0;
  413. getreg(reg, &val, 0, regs);
  414. return val;
  415. }
  416. void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
  417. {
  418. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  419. long sof = (regs->cr_ifs) & 0x7f;
  420. if (!reg)
  421. return;
  422. if (reg >= sof + 32)
  423. return;
  424. setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
  425. }
  426. void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  427. struct kvm_pt_regs *regs)
  428. {
  429. /* Take floating register rotation into consideration*/
  430. if (regnum >= IA64_FIRST_ROTATING_FR)
  431. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  432. #define CASE_FIXED_FP(reg) \
  433. case (reg) : \
  434. ia64_stf_spill(fpval, reg); \
  435. break
  436. switch (regnum) {
  437. CASE_FIXED_FP(0);
  438. CASE_FIXED_FP(1);
  439. CASE_FIXED_FP(2);
  440. CASE_FIXED_FP(3);
  441. CASE_FIXED_FP(4);
  442. CASE_FIXED_FP(5);
  443. CASE_FIXED_FP(6);
  444. CASE_FIXED_FP(7);
  445. CASE_FIXED_FP(8);
  446. CASE_FIXED_FP(9);
  447. CASE_FIXED_FP(10);
  448. CASE_FIXED_FP(11);
  449. CASE_FIXED_FP(12);
  450. CASE_FIXED_FP(13);
  451. CASE_FIXED_FP(14);
  452. CASE_FIXED_FP(15);
  453. CASE_FIXED_FP(16);
  454. CASE_FIXED_FP(17);
  455. CASE_FIXED_FP(18);
  456. CASE_FIXED_FP(19);
  457. CASE_FIXED_FP(20);
  458. CASE_FIXED_FP(21);
  459. CASE_FIXED_FP(22);
  460. CASE_FIXED_FP(23);
  461. CASE_FIXED_FP(24);
  462. CASE_FIXED_FP(25);
  463. CASE_FIXED_FP(26);
  464. CASE_FIXED_FP(27);
  465. CASE_FIXED_FP(28);
  466. CASE_FIXED_FP(29);
  467. CASE_FIXED_FP(30);
  468. CASE_FIXED_FP(31);
  469. CASE_FIXED_FP(32);
  470. CASE_FIXED_FP(33);
  471. CASE_FIXED_FP(34);
  472. CASE_FIXED_FP(35);
  473. CASE_FIXED_FP(36);
  474. CASE_FIXED_FP(37);
  475. CASE_FIXED_FP(38);
  476. CASE_FIXED_FP(39);
  477. CASE_FIXED_FP(40);
  478. CASE_FIXED_FP(41);
  479. CASE_FIXED_FP(42);
  480. CASE_FIXED_FP(43);
  481. CASE_FIXED_FP(44);
  482. CASE_FIXED_FP(45);
  483. CASE_FIXED_FP(46);
  484. CASE_FIXED_FP(47);
  485. CASE_FIXED_FP(48);
  486. CASE_FIXED_FP(49);
  487. CASE_FIXED_FP(50);
  488. CASE_FIXED_FP(51);
  489. CASE_FIXED_FP(52);
  490. CASE_FIXED_FP(53);
  491. CASE_FIXED_FP(54);
  492. CASE_FIXED_FP(55);
  493. CASE_FIXED_FP(56);
  494. CASE_FIXED_FP(57);
  495. CASE_FIXED_FP(58);
  496. CASE_FIXED_FP(59);
  497. CASE_FIXED_FP(60);
  498. CASE_FIXED_FP(61);
  499. CASE_FIXED_FP(62);
  500. CASE_FIXED_FP(63);
  501. CASE_FIXED_FP(64);
  502. CASE_FIXED_FP(65);
  503. CASE_FIXED_FP(66);
  504. CASE_FIXED_FP(67);
  505. CASE_FIXED_FP(68);
  506. CASE_FIXED_FP(69);
  507. CASE_FIXED_FP(70);
  508. CASE_FIXED_FP(71);
  509. CASE_FIXED_FP(72);
  510. CASE_FIXED_FP(73);
  511. CASE_FIXED_FP(74);
  512. CASE_FIXED_FP(75);
  513. CASE_FIXED_FP(76);
  514. CASE_FIXED_FP(77);
  515. CASE_FIXED_FP(78);
  516. CASE_FIXED_FP(79);
  517. CASE_FIXED_FP(80);
  518. CASE_FIXED_FP(81);
  519. CASE_FIXED_FP(82);
  520. CASE_FIXED_FP(83);
  521. CASE_FIXED_FP(84);
  522. CASE_FIXED_FP(85);
  523. CASE_FIXED_FP(86);
  524. CASE_FIXED_FP(87);
  525. CASE_FIXED_FP(88);
  526. CASE_FIXED_FP(89);
  527. CASE_FIXED_FP(90);
  528. CASE_FIXED_FP(91);
  529. CASE_FIXED_FP(92);
  530. CASE_FIXED_FP(93);
  531. CASE_FIXED_FP(94);
  532. CASE_FIXED_FP(95);
  533. CASE_FIXED_FP(96);
  534. CASE_FIXED_FP(97);
  535. CASE_FIXED_FP(98);
  536. CASE_FIXED_FP(99);
  537. CASE_FIXED_FP(100);
  538. CASE_FIXED_FP(101);
  539. CASE_FIXED_FP(102);
  540. CASE_FIXED_FP(103);
  541. CASE_FIXED_FP(104);
  542. CASE_FIXED_FP(105);
  543. CASE_FIXED_FP(106);
  544. CASE_FIXED_FP(107);
  545. CASE_FIXED_FP(108);
  546. CASE_FIXED_FP(109);
  547. CASE_FIXED_FP(110);
  548. CASE_FIXED_FP(111);
  549. CASE_FIXED_FP(112);
  550. CASE_FIXED_FP(113);
  551. CASE_FIXED_FP(114);
  552. CASE_FIXED_FP(115);
  553. CASE_FIXED_FP(116);
  554. CASE_FIXED_FP(117);
  555. CASE_FIXED_FP(118);
  556. CASE_FIXED_FP(119);
  557. CASE_FIXED_FP(120);
  558. CASE_FIXED_FP(121);
  559. CASE_FIXED_FP(122);
  560. CASE_FIXED_FP(123);
  561. CASE_FIXED_FP(124);
  562. CASE_FIXED_FP(125);
  563. CASE_FIXED_FP(126);
  564. CASE_FIXED_FP(127);
  565. }
  566. #undef CASE_FIXED_FP
  567. }
  568. void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  569. struct kvm_pt_regs *regs)
  570. {
  571. /* Take floating register rotation into consideration*/
  572. if (regnum >= IA64_FIRST_ROTATING_FR)
  573. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  574. #define CASE_FIXED_FP(reg) \
  575. case (reg) : \
  576. ia64_ldf_fill(reg, fpval); \
  577. break
  578. switch (regnum) {
  579. CASE_FIXED_FP(2);
  580. CASE_FIXED_FP(3);
  581. CASE_FIXED_FP(4);
  582. CASE_FIXED_FP(5);
  583. CASE_FIXED_FP(6);
  584. CASE_FIXED_FP(7);
  585. CASE_FIXED_FP(8);
  586. CASE_FIXED_FP(9);
  587. CASE_FIXED_FP(10);
  588. CASE_FIXED_FP(11);
  589. CASE_FIXED_FP(12);
  590. CASE_FIXED_FP(13);
  591. CASE_FIXED_FP(14);
  592. CASE_FIXED_FP(15);
  593. CASE_FIXED_FP(16);
  594. CASE_FIXED_FP(17);
  595. CASE_FIXED_FP(18);
  596. CASE_FIXED_FP(19);
  597. CASE_FIXED_FP(20);
  598. CASE_FIXED_FP(21);
  599. CASE_FIXED_FP(22);
  600. CASE_FIXED_FP(23);
  601. CASE_FIXED_FP(24);
  602. CASE_FIXED_FP(25);
  603. CASE_FIXED_FP(26);
  604. CASE_FIXED_FP(27);
  605. CASE_FIXED_FP(28);
  606. CASE_FIXED_FP(29);
  607. CASE_FIXED_FP(30);
  608. CASE_FIXED_FP(31);
  609. CASE_FIXED_FP(32);
  610. CASE_FIXED_FP(33);
  611. CASE_FIXED_FP(34);
  612. CASE_FIXED_FP(35);
  613. CASE_FIXED_FP(36);
  614. CASE_FIXED_FP(37);
  615. CASE_FIXED_FP(38);
  616. CASE_FIXED_FP(39);
  617. CASE_FIXED_FP(40);
  618. CASE_FIXED_FP(41);
  619. CASE_FIXED_FP(42);
  620. CASE_FIXED_FP(43);
  621. CASE_FIXED_FP(44);
  622. CASE_FIXED_FP(45);
  623. CASE_FIXED_FP(46);
  624. CASE_FIXED_FP(47);
  625. CASE_FIXED_FP(48);
  626. CASE_FIXED_FP(49);
  627. CASE_FIXED_FP(50);
  628. CASE_FIXED_FP(51);
  629. CASE_FIXED_FP(52);
  630. CASE_FIXED_FP(53);
  631. CASE_FIXED_FP(54);
  632. CASE_FIXED_FP(55);
  633. CASE_FIXED_FP(56);
  634. CASE_FIXED_FP(57);
  635. CASE_FIXED_FP(58);
  636. CASE_FIXED_FP(59);
  637. CASE_FIXED_FP(60);
  638. CASE_FIXED_FP(61);
  639. CASE_FIXED_FP(62);
  640. CASE_FIXED_FP(63);
  641. CASE_FIXED_FP(64);
  642. CASE_FIXED_FP(65);
  643. CASE_FIXED_FP(66);
  644. CASE_FIXED_FP(67);
  645. CASE_FIXED_FP(68);
  646. CASE_FIXED_FP(69);
  647. CASE_FIXED_FP(70);
  648. CASE_FIXED_FP(71);
  649. CASE_FIXED_FP(72);
  650. CASE_FIXED_FP(73);
  651. CASE_FIXED_FP(74);
  652. CASE_FIXED_FP(75);
  653. CASE_FIXED_FP(76);
  654. CASE_FIXED_FP(77);
  655. CASE_FIXED_FP(78);
  656. CASE_FIXED_FP(79);
  657. CASE_FIXED_FP(80);
  658. CASE_FIXED_FP(81);
  659. CASE_FIXED_FP(82);
  660. CASE_FIXED_FP(83);
  661. CASE_FIXED_FP(84);
  662. CASE_FIXED_FP(85);
  663. CASE_FIXED_FP(86);
  664. CASE_FIXED_FP(87);
  665. CASE_FIXED_FP(88);
  666. CASE_FIXED_FP(89);
  667. CASE_FIXED_FP(90);
  668. CASE_FIXED_FP(91);
  669. CASE_FIXED_FP(92);
  670. CASE_FIXED_FP(93);
  671. CASE_FIXED_FP(94);
  672. CASE_FIXED_FP(95);
  673. CASE_FIXED_FP(96);
  674. CASE_FIXED_FP(97);
  675. CASE_FIXED_FP(98);
  676. CASE_FIXED_FP(99);
  677. CASE_FIXED_FP(100);
  678. CASE_FIXED_FP(101);
  679. CASE_FIXED_FP(102);
  680. CASE_FIXED_FP(103);
  681. CASE_FIXED_FP(104);
  682. CASE_FIXED_FP(105);
  683. CASE_FIXED_FP(106);
  684. CASE_FIXED_FP(107);
  685. CASE_FIXED_FP(108);
  686. CASE_FIXED_FP(109);
  687. CASE_FIXED_FP(110);
  688. CASE_FIXED_FP(111);
  689. CASE_FIXED_FP(112);
  690. CASE_FIXED_FP(113);
  691. CASE_FIXED_FP(114);
  692. CASE_FIXED_FP(115);
  693. CASE_FIXED_FP(116);
  694. CASE_FIXED_FP(117);
  695. CASE_FIXED_FP(118);
  696. CASE_FIXED_FP(119);
  697. CASE_FIXED_FP(120);
  698. CASE_FIXED_FP(121);
  699. CASE_FIXED_FP(122);
  700. CASE_FIXED_FP(123);
  701. CASE_FIXED_FP(124);
  702. CASE_FIXED_FP(125);
  703. CASE_FIXED_FP(126);
  704. CASE_FIXED_FP(127);
  705. }
  706. }
  707. void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  708. struct ia64_fpreg *val)
  709. {
  710. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  711. getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  712. }
  713. void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  714. struct ia64_fpreg *val)
  715. {
  716. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  717. if (reg > 1)
  718. setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  719. }
  720. /************************************************************************
  721. * lsapic timer
  722. ***********************************************************************/
  723. u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
  724. {
  725. unsigned long guest_itc;
  726. guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
  727. if (guest_itc >= VMX(vcpu, last_itc)) {
  728. VMX(vcpu, last_itc) = guest_itc;
  729. return guest_itc;
  730. } else
  731. return VMX(vcpu, last_itc);
  732. }
  733. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
  734. static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
  735. {
  736. struct kvm_vcpu *v;
  737. int i;
  738. long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
  739. unsigned long vitv = VCPU(vcpu, itv);
  740. if (vcpu->vcpu_id == 0) {
  741. for (i = 0; i < KVM_MAX_VCPUS; i++) {
  742. v = (struct kvm_vcpu *)((char *)vcpu +
  743. sizeof(struct kvm_vcpu_data) * i);
  744. VMX(v, itc_offset) = itc_offset;
  745. VMX(v, last_itc) = 0;
  746. }
  747. }
  748. VMX(vcpu, last_itc) = 0;
  749. if (VCPU(vcpu, itm) <= val) {
  750. VMX(vcpu, itc_check) = 0;
  751. vcpu_unpend_interrupt(vcpu, vitv);
  752. } else {
  753. VMX(vcpu, itc_check) = 1;
  754. vcpu_set_itm(vcpu, VCPU(vcpu, itm));
  755. }
  756. }
  757. static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
  758. {
  759. return ((u64)VCPU(vcpu, itm));
  760. }
  761. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
  762. {
  763. unsigned long vitv = VCPU(vcpu, itv);
  764. VCPU(vcpu, itm) = val;
  765. if (val > vcpu_get_itc(vcpu)) {
  766. VMX(vcpu, itc_check) = 1;
  767. vcpu_unpend_interrupt(vcpu, vitv);
  768. VMX(vcpu, timer_pending) = 0;
  769. } else
  770. VMX(vcpu, itc_check) = 0;
  771. }
  772. #define ITV_VECTOR(itv) (itv&0xff)
  773. #define ITV_IRQ_MASK(itv) (itv&(1<<16))
  774. static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
  775. {
  776. VCPU(vcpu, itv) = val;
  777. if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
  778. vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
  779. vcpu->arch.timer_pending = 0;
  780. }
  781. }
  782. static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
  783. {
  784. int vec;
  785. vec = highest_inservice_irq(vcpu);
  786. if (vec == NULL_VECTOR)
  787. return;
  788. VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
  789. VCPU(vcpu, eoi) = 0;
  790. vcpu->arch.irq_new_pending = 1;
  791. }
  792. /* See Table 5-8 in SDM vol2 for the definition */
  793. int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
  794. {
  795. union ia64_tpr vtpr;
  796. vtpr.val = VCPU(vcpu, tpr);
  797. if (h_inservice == NMI_VECTOR)
  798. return IRQ_MASKED_BY_INSVC;
  799. if (h_pending == NMI_VECTOR) {
  800. /* Non Maskable Interrupt */
  801. return IRQ_NO_MASKED;
  802. }
  803. if (h_inservice == ExtINT_VECTOR)
  804. return IRQ_MASKED_BY_INSVC;
  805. if (h_pending == ExtINT_VECTOR) {
  806. if (vtpr.mmi) {
  807. /* mask all external IRQ */
  808. return IRQ_MASKED_BY_VTPR;
  809. } else
  810. return IRQ_NO_MASKED;
  811. }
  812. if (is_higher_irq(h_pending, h_inservice)) {
  813. if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
  814. return IRQ_NO_MASKED;
  815. else
  816. return IRQ_MASKED_BY_VTPR;
  817. } else {
  818. return IRQ_MASKED_BY_INSVC;
  819. }
  820. }
  821. void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  822. {
  823. long spsr;
  824. int ret;
  825. local_irq_save(spsr);
  826. ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
  827. local_irq_restore(spsr);
  828. vcpu->arch.irq_new_pending = 1;
  829. }
  830. void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  831. {
  832. long spsr;
  833. int ret;
  834. local_irq_save(spsr);
  835. ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
  836. local_irq_restore(spsr);
  837. if (ret) {
  838. vcpu->arch.irq_new_pending = 1;
  839. wmb();
  840. }
  841. }
  842. void update_vhpi(struct kvm_vcpu *vcpu, int vec)
  843. {
  844. u64 vhpi;
  845. if (vec == NULL_VECTOR)
  846. vhpi = 0;
  847. else if (vec == NMI_VECTOR)
  848. vhpi = 32;
  849. else if (vec == ExtINT_VECTOR)
  850. vhpi = 16;
  851. else
  852. vhpi = vec >> 4;
  853. VCPU(vcpu, vhpi) = vhpi;
  854. if (VCPU(vcpu, vac).a_int)
  855. ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
  856. (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
  857. }
  858. u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
  859. {
  860. int vec, h_inservice, mask;
  861. vec = highest_pending_irq(vcpu);
  862. h_inservice = highest_inservice_irq(vcpu);
  863. mask = irq_masked(vcpu, vec, h_inservice);
  864. if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
  865. if (VCPU(vcpu, vhpi))
  866. update_vhpi(vcpu, NULL_VECTOR);
  867. return IA64_SPURIOUS_INT_VECTOR;
  868. }
  869. if (mask == IRQ_MASKED_BY_VTPR) {
  870. update_vhpi(vcpu, vec);
  871. return IA64_SPURIOUS_INT_VECTOR;
  872. }
  873. VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
  874. vcpu_unpend_interrupt(vcpu, vec);
  875. return (u64)vec;
  876. }
  877. /**************************************************************************
  878. Privileged operation emulation routines
  879. **************************************************************************/
  880. u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
  881. {
  882. union ia64_pta vpta;
  883. union ia64_rr vrr;
  884. u64 pval;
  885. u64 vhpt_offset;
  886. vpta.val = vcpu_get_pta(vcpu);
  887. vrr.val = vcpu_get_rr(vcpu, vadr);
  888. vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
  889. if (vpta.vf) {
  890. pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
  891. vpta.val, 0, 0, 0, 0);
  892. } else {
  893. pval = (vadr & VRN_MASK) | vhpt_offset |
  894. (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
  895. }
  896. return pval;
  897. }
  898. u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
  899. {
  900. union ia64_rr vrr;
  901. union ia64_pta vpta;
  902. u64 pval;
  903. vpta.val = vcpu_get_pta(vcpu);
  904. vrr.val = vcpu_get_rr(vcpu, vadr);
  905. if (vpta.vf) {
  906. pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
  907. 0, 0, 0, 0, 0);
  908. } else
  909. pval = 1;
  910. return pval;
  911. }
  912. u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
  913. {
  914. struct thash_data *data;
  915. union ia64_pta vpta;
  916. u64 key;
  917. vpta.val = vcpu_get_pta(vcpu);
  918. if (vpta.vf == 0) {
  919. key = 1;
  920. return key;
  921. }
  922. data = vtlb_lookup(vcpu, vadr, D_TLB);
  923. if (!data || !data->p)
  924. key = 1;
  925. else
  926. key = data->key;
  927. return key;
  928. }
  929. void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
  930. {
  931. unsigned long thash, vadr;
  932. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  933. thash = vcpu_thash(vcpu, vadr);
  934. vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
  935. }
  936. void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
  937. {
  938. unsigned long tag, vadr;
  939. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  940. tag = vcpu_ttag(vcpu, vadr);
  941. vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
  942. }
  943. int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
  944. {
  945. struct thash_data *data;
  946. union ia64_isr visr, pt_isr;
  947. struct kvm_pt_regs *regs;
  948. struct ia64_psr vpsr;
  949. regs = vcpu_regs(vcpu);
  950. pt_isr.val = VMX(vcpu, cr_isr);
  951. visr.val = 0;
  952. visr.ei = pt_isr.ei;
  953. visr.ir = pt_isr.ir;
  954. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  955. visr.na = 1;
  956. data = vhpt_lookup(vadr);
  957. if (data) {
  958. if (data->p == 0) {
  959. vcpu_set_isr(vcpu, visr.val);
  960. data_page_not_present(vcpu, vadr);
  961. return IA64_FAULT;
  962. } else if (data->ma == VA_MATTR_NATPAGE) {
  963. vcpu_set_isr(vcpu, visr.val);
  964. dnat_page_consumption(vcpu, vadr);
  965. return IA64_FAULT;
  966. } else {
  967. *padr = (data->gpaddr >> data->ps << data->ps) |
  968. (vadr & (PSIZE(data->ps) - 1));
  969. return IA64_NO_FAULT;
  970. }
  971. }
  972. data = vtlb_lookup(vcpu, vadr, D_TLB);
  973. if (data) {
  974. if (data->p == 0) {
  975. vcpu_set_isr(vcpu, visr.val);
  976. data_page_not_present(vcpu, vadr);
  977. return IA64_FAULT;
  978. } else if (data->ma == VA_MATTR_NATPAGE) {
  979. vcpu_set_isr(vcpu, visr.val);
  980. dnat_page_consumption(vcpu, vadr);
  981. return IA64_FAULT;
  982. } else{
  983. *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
  984. | (vadr & (PSIZE(data->ps) - 1));
  985. return IA64_NO_FAULT;
  986. }
  987. }
  988. if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
  989. if (vpsr.ic) {
  990. vcpu_set_isr(vcpu, visr.val);
  991. alt_dtlb(vcpu, vadr);
  992. return IA64_FAULT;
  993. } else {
  994. nested_dtlb(vcpu);
  995. return IA64_FAULT;
  996. }
  997. } else {
  998. if (vpsr.ic) {
  999. vcpu_set_isr(vcpu, visr.val);
  1000. dvhpt_fault(vcpu, vadr);
  1001. return IA64_FAULT;
  1002. } else{
  1003. nested_dtlb(vcpu);
  1004. return IA64_FAULT;
  1005. }
  1006. }
  1007. return IA64_NO_FAULT;
  1008. }
  1009. int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
  1010. {
  1011. unsigned long r1, r3;
  1012. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1013. if (vcpu_tpa(vcpu, r3, &r1))
  1014. return IA64_FAULT;
  1015. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1016. return(IA64_NO_FAULT);
  1017. }
  1018. void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
  1019. {
  1020. unsigned long r1, r3;
  1021. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1022. r1 = vcpu_tak(vcpu, r3);
  1023. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1024. }
  1025. /************************************
  1026. * Insert/Purge translation register/cache
  1027. ************************************/
  1028. void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1029. {
  1030. thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
  1031. }
  1032. void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1033. {
  1034. thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
  1035. }
  1036. void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1037. {
  1038. u64 ps, va, rid;
  1039. struct thash_data *p_itr;
  1040. ps = itir_ps(itir);
  1041. va = PAGEALIGN(ifa, ps);
  1042. pte &= ~PAGE_FLAGS_RV_MASK;
  1043. rid = vcpu_get_rr(vcpu, ifa);
  1044. rid = rid & RR_RID_MASK;
  1045. p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
  1046. vcpu_set_tr(p_itr, pte, itir, va, rid);
  1047. vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
  1048. }
  1049. void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1050. {
  1051. u64 gpfn;
  1052. u64 ps, va, rid;
  1053. struct thash_data *p_dtr;
  1054. ps = itir_ps(itir);
  1055. va = PAGEALIGN(ifa, ps);
  1056. pte &= ~PAGE_FLAGS_RV_MASK;
  1057. if (ps != _PAGE_SIZE_16M)
  1058. thash_purge_entries(vcpu, va, ps);
  1059. gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  1060. if (__gpfn_is_io(gpfn))
  1061. pte |= VTLB_PTE_IO;
  1062. rid = vcpu_get_rr(vcpu, va);
  1063. rid = rid & RR_RID_MASK;
  1064. p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
  1065. vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
  1066. pte, itir, va, rid);
  1067. vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
  1068. }
  1069. void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1070. {
  1071. int index;
  1072. u64 va;
  1073. va = PAGEALIGN(ifa, ps);
  1074. while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
  1075. vcpu->arch.dtrs[index].page_flags = 0;
  1076. thash_purge_entries(vcpu, va, ps);
  1077. }
  1078. void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1079. {
  1080. int index;
  1081. u64 va;
  1082. va = PAGEALIGN(ifa, ps);
  1083. while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
  1084. vcpu->arch.itrs[index].page_flags = 0;
  1085. thash_purge_entries(vcpu, va, ps);
  1086. }
  1087. void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1088. {
  1089. va = PAGEALIGN(va, ps);
  1090. thash_purge_entries(vcpu, va, ps);
  1091. }
  1092. void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
  1093. {
  1094. thash_purge_all(vcpu);
  1095. }
  1096. void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1097. {
  1098. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1099. long psr;
  1100. local_irq_save(psr);
  1101. p->exit_reason = EXIT_REASON_PTC_G;
  1102. p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
  1103. p->u.ptc_g_data.vaddr = va;
  1104. p->u.ptc_g_data.ps = ps;
  1105. vmm_transition(vcpu);
  1106. /* Do Local Purge Here*/
  1107. vcpu_ptc_l(vcpu, va, ps);
  1108. local_irq_restore(psr);
  1109. }
  1110. void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1111. {
  1112. vcpu_ptc_ga(vcpu, va, ps);
  1113. }
  1114. void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
  1115. {
  1116. unsigned long ifa;
  1117. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1118. vcpu_ptc_e(vcpu, ifa);
  1119. }
  1120. void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
  1121. {
  1122. unsigned long ifa, itir;
  1123. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1124. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1125. vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
  1126. }
  1127. void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
  1128. {
  1129. unsigned long ifa, itir;
  1130. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1131. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1132. vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
  1133. }
  1134. void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
  1135. {
  1136. unsigned long ifa, itir;
  1137. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1138. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1139. vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
  1140. }
  1141. void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1142. {
  1143. unsigned long ifa, itir;
  1144. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1145. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1146. vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
  1147. }
  1148. void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1149. {
  1150. unsigned long ifa, itir;
  1151. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1152. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1153. vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
  1154. }
  1155. void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1156. {
  1157. unsigned long itir, ifa, pte, slot;
  1158. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1159. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1160. itir = vcpu_get_itir(vcpu);
  1161. ifa = vcpu_get_ifa(vcpu);
  1162. vcpu_itr_d(vcpu, slot, pte, itir, ifa);
  1163. }
  1164. void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1165. {
  1166. unsigned long itir, ifa, pte, slot;
  1167. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1168. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1169. itir = vcpu_get_itir(vcpu);
  1170. ifa = vcpu_get_ifa(vcpu);
  1171. vcpu_itr_i(vcpu, slot, pte, itir, ifa);
  1172. }
  1173. void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
  1174. {
  1175. unsigned long itir, ifa, pte;
  1176. itir = vcpu_get_itir(vcpu);
  1177. ifa = vcpu_get_ifa(vcpu);
  1178. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1179. vcpu_itc_d(vcpu, pte, itir, ifa);
  1180. }
  1181. void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
  1182. {
  1183. unsigned long itir, ifa, pte;
  1184. itir = vcpu_get_itir(vcpu);
  1185. ifa = vcpu_get_ifa(vcpu);
  1186. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1187. vcpu_itc_i(vcpu, pte, itir, ifa);
  1188. }
  1189. /*************************************
  1190. * Moves to semi-privileged registers
  1191. *************************************/
  1192. void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
  1193. {
  1194. unsigned long imm;
  1195. if (inst.M30.s)
  1196. imm = -inst.M30.imm;
  1197. else
  1198. imm = inst.M30.imm;
  1199. vcpu_set_itc(vcpu, imm);
  1200. }
  1201. void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1202. {
  1203. unsigned long r2;
  1204. r2 = vcpu_get_gr(vcpu, inst.M29.r2);
  1205. vcpu_set_itc(vcpu, r2);
  1206. }
  1207. void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1208. {
  1209. unsigned long r1;
  1210. r1 = vcpu_get_itc(vcpu);
  1211. vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
  1212. }
  1213. /**************************************************************************
  1214. struct kvm_vcpu*protection key register access routines
  1215. **************************************************************************/
  1216. unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
  1217. {
  1218. return ((unsigned long)ia64_get_pkr(reg));
  1219. }
  1220. void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
  1221. {
  1222. ia64_set_pkr(reg, val);
  1223. }
  1224. unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
  1225. {
  1226. union ia64_rr rr, rr1;
  1227. rr.val = vcpu_get_rr(vcpu, ifa);
  1228. rr1.val = 0;
  1229. rr1.ps = rr.ps;
  1230. rr1.rid = rr.rid;
  1231. return (rr1.val);
  1232. }
  1233. /********************************
  1234. * Moves to privileged registers
  1235. ********************************/
  1236. unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
  1237. unsigned long val)
  1238. {
  1239. union ia64_rr oldrr, newrr;
  1240. unsigned long rrval;
  1241. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1242. unsigned long psr;
  1243. oldrr.val = vcpu_get_rr(vcpu, reg);
  1244. newrr.val = val;
  1245. vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
  1246. switch ((unsigned long)(reg >> VRN_SHIFT)) {
  1247. case VRN6:
  1248. vcpu->arch.vmm_rr = vrrtomrr(val);
  1249. local_irq_save(psr);
  1250. p->exit_reason = EXIT_REASON_SWITCH_RR6;
  1251. vmm_transition(vcpu);
  1252. local_irq_restore(psr);
  1253. break;
  1254. case VRN4:
  1255. rrval = vrrtomrr(val);
  1256. vcpu->arch.metaphysical_saved_rr4 = rrval;
  1257. if (!is_physical_mode(vcpu))
  1258. ia64_set_rr(reg, rrval);
  1259. break;
  1260. case VRN0:
  1261. rrval = vrrtomrr(val);
  1262. vcpu->arch.metaphysical_saved_rr0 = rrval;
  1263. if (!is_physical_mode(vcpu))
  1264. ia64_set_rr(reg, rrval);
  1265. break;
  1266. default:
  1267. ia64_set_rr(reg, vrrtomrr(val));
  1268. break;
  1269. }
  1270. return (IA64_NO_FAULT);
  1271. }
  1272. void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1273. {
  1274. unsigned long r3, r2;
  1275. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1276. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1277. vcpu_set_rr(vcpu, r3, r2);
  1278. }
  1279. void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1280. {
  1281. }
  1282. void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1283. {
  1284. }
  1285. void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1286. {
  1287. unsigned long r3, r2;
  1288. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1289. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1290. vcpu_set_pmc(vcpu, r3, r2);
  1291. }
  1292. void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
  1293. {
  1294. unsigned long r3, r2;
  1295. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1296. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1297. vcpu_set_pmd(vcpu, r3, r2);
  1298. }
  1299. void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1300. {
  1301. u64 r3, r2;
  1302. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1303. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1304. vcpu_set_pkr(vcpu, r3, r2);
  1305. }
  1306. void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1307. {
  1308. unsigned long r3, r1;
  1309. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1310. r1 = vcpu_get_rr(vcpu, r3);
  1311. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1312. }
  1313. void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1314. {
  1315. unsigned long r3, r1;
  1316. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1317. r1 = vcpu_get_pkr(vcpu, r3);
  1318. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1319. }
  1320. void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1321. {
  1322. unsigned long r3, r1;
  1323. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1324. r1 = vcpu_get_dbr(vcpu, r3);
  1325. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1326. }
  1327. void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1328. {
  1329. unsigned long r3, r1;
  1330. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1331. r1 = vcpu_get_ibr(vcpu, r3);
  1332. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1333. }
  1334. void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1335. {
  1336. unsigned long r3, r1;
  1337. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1338. r1 = vcpu_get_pmc(vcpu, r3);
  1339. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1340. }
  1341. unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
  1342. {
  1343. /* FIXME: This could get called as a result of a rsvd-reg fault */
  1344. if (reg > (ia64_get_cpuid(3) & 0xff))
  1345. return 0;
  1346. else
  1347. return ia64_get_cpuid(reg);
  1348. }
  1349. void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
  1350. {
  1351. unsigned long r3, r1;
  1352. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1353. r1 = vcpu_get_cpuid(vcpu, r3);
  1354. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1355. }
  1356. void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
  1357. {
  1358. VCPU(vcpu, tpr) = val;
  1359. vcpu->arch.irq_check = 1;
  1360. }
  1361. unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1362. {
  1363. unsigned long r2;
  1364. r2 = vcpu_get_gr(vcpu, inst.M32.r2);
  1365. VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
  1366. switch (inst.M32.cr3) {
  1367. case 0:
  1368. vcpu_set_dcr(vcpu, r2);
  1369. break;
  1370. case 1:
  1371. vcpu_set_itm(vcpu, r2);
  1372. break;
  1373. case 66:
  1374. vcpu_set_tpr(vcpu, r2);
  1375. break;
  1376. case 67:
  1377. vcpu_set_eoi(vcpu, r2);
  1378. break;
  1379. default:
  1380. break;
  1381. }
  1382. return 0;
  1383. }
  1384. unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1385. {
  1386. unsigned long tgt = inst.M33.r1;
  1387. unsigned long val;
  1388. switch (inst.M33.cr3) {
  1389. case 65:
  1390. val = vcpu_get_ivr(vcpu);
  1391. vcpu_set_gr(vcpu, tgt, val, 0);
  1392. break;
  1393. case 67:
  1394. vcpu_set_gr(vcpu, tgt, 0L, 0);
  1395. break;
  1396. default:
  1397. val = VCPU(vcpu, vcr[inst.M33.cr3]);
  1398. vcpu_set_gr(vcpu, tgt, val, 0);
  1399. break;
  1400. }
  1401. return 0;
  1402. }
  1403. void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
  1404. {
  1405. unsigned long mask;
  1406. struct kvm_pt_regs *regs;
  1407. struct ia64_psr old_psr, new_psr;
  1408. old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1409. regs = vcpu_regs(vcpu);
  1410. /* We only support guest as:
  1411. * vpsr.pk = 0
  1412. * vpsr.is = 0
  1413. * Otherwise panic
  1414. */
  1415. if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
  1416. panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
  1417. & vpsr.is=0\n");
  1418. /*
  1419. * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  1420. * Since these bits will become 0, after success execution of each
  1421. * instruction, we will change set them to mIA64_PSR
  1422. */
  1423. VCPU(vcpu, vpsr) = val
  1424. & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
  1425. IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
  1426. if (!old_psr.i && (val & IA64_PSR_I)) {
  1427. /* vpsr.i 0->1 */
  1428. vcpu->arch.irq_check = 1;
  1429. }
  1430. new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1431. /*
  1432. * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  1433. * , except for the following bits:
  1434. * ic/i/dt/si/rt/mc/it/bn/vm
  1435. */
  1436. mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  1437. IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  1438. IA64_PSR_VM;
  1439. regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
  1440. check_mm_mode_switch(vcpu, old_psr, new_psr);
  1441. return ;
  1442. }
  1443. unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
  1444. {
  1445. struct ia64_psr vpsr;
  1446. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1447. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1448. if (!vpsr.ic)
  1449. VCPU(vcpu, ifs) = regs->cr_ifs;
  1450. regs->cr_ifs = IA64_IFS_V;
  1451. return (IA64_NO_FAULT);
  1452. }
  1453. /**************************************************************************
  1454. VCPU banked general register access routines
  1455. **************************************************************************/
  1456. #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1457. do { \
  1458. __asm__ __volatile__ ( \
  1459. ";;extr.u %0 = %3,%6,16;;\n" \
  1460. "dep %1 = %0, %1, 0, 16;;\n" \
  1461. "st8 [%4] = %1\n" \
  1462. "extr.u %0 = %2, 16, 16;;\n" \
  1463. "dep %3 = %0, %3, %6, 16;;\n" \
  1464. "st8 [%5] = %3\n" \
  1465. ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
  1466. "r"(*runat), "r"(b1unat), "r"(runat), \
  1467. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1468. } while (0)
  1469. void vcpu_bsw0(struct kvm_vcpu *vcpu)
  1470. {
  1471. unsigned long i;
  1472. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1473. unsigned long *r = &regs->r16;
  1474. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1475. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1476. unsigned long *runat = &regs->eml_unat;
  1477. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1478. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1479. if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  1480. for (i = 0; i < 16; i++) {
  1481. *b1++ = *r;
  1482. *r++ = *b0++;
  1483. }
  1484. vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  1485. VMM_PT_REGS_R16_SLOT);
  1486. VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  1487. }
  1488. }
  1489. #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1490. do { \
  1491. __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
  1492. "dep %1 = %0, %1, 16, 16;;\n" \
  1493. "st8 [%4] = %1\n" \
  1494. "extr.u %0 = %2, 0, 16;;\n" \
  1495. "dep %3 = %0, %3, %6, 16;;\n" \
  1496. "st8 [%5] = %3\n" \
  1497. ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
  1498. "r"(*runat), "r"(b0unat), "r"(runat), \
  1499. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1500. } while (0)
  1501. void vcpu_bsw1(struct kvm_vcpu *vcpu)
  1502. {
  1503. unsigned long i;
  1504. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1505. unsigned long *r = &regs->r16;
  1506. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1507. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1508. unsigned long *runat = &regs->eml_unat;
  1509. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1510. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1511. if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  1512. for (i = 0; i < 16; i++) {
  1513. *b0++ = *r;
  1514. *r++ = *b1++;
  1515. }
  1516. vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  1517. VMM_PT_REGS_R16_SLOT);
  1518. VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  1519. }
  1520. }
  1521. void vcpu_rfi(struct kvm_vcpu *vcpu)
  1522. {
  1523. unsigned long ifs, psr;
  1524. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1525. psr = VCPU(vcpu, ipsr);
  1526. if (psr & IA64_PSR_BN)
  1527. vcpu_bsw1(vcpu);
  1528. else
  1529. vcpu_bsw0(vcpu);
  1530. vcpu_set_psr(vcpu, psr);
  1531. ifs = VCPU(vcpu, ifs);
  1532. if (ifs >> 63)
  1533. regs->cr_ifs = ifs;
  1534. regs->cr_iip = VCPU(vcpu, iip);
  1535. }
  1536. /*
  1537. VPSR can't keep track of below bits of guest PSR
  1538. This function gets guest PSR
  1539. */
  1540. unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
  1541. {
  1542. unsigned long mask;
  1543. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1544. mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  1545. IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  1546. return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  1547. }
  1548. void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
  1549. {
  1550. unsigned long vpsr;
  1551. unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
  1552. | inst.M44.imm;
  1553. vpsr = vcpu_get_psr(vcpu);
  1554. vpsr &= (~imm24);
  1555. vcpu_set_psr(vcpu, vpsr);
  1556. }
  1557. void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
  1558. {
  1559. unsigned long vpsr;
  1560. unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
  1561. | inst.M44.imm;
  1562. vpsr = vcpu_get_psr(vcpu);
  1563. vpsr |= imm24;
  1564. vcpu_set_psr(vcpu, vpsr);
  1565. }
  1566. /* Generate Mask
  1567. * Parameter:
  1568. * bit -- starting bit
  1569. * len -- how many bits
  1570. */
  1571. #define MASK(bit,len) \
  1572. ({ \
  1573. __u64 ret; \
  1574. \
  1575. __asm __volatile("dep %0=-1, r0, %1, %2"\
  1576. : "=r" (ret): \
  1577. "M" (bit), \
  1578. "M" (len)); \
  1579. ret; \
  1580. })
  1581. void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
  1582. {
  1583. val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
  1584. vcpu_set_psr(vcpu, val);
  1585. }
  1586. void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1587. {
  1588. unsigned long val;
  1589. val = vcpu_get_gr(vcpu, inst.M35.r2);
  1590. vcpu_set_psr_l(vcpu, val);
  1591. }
  1592. void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1593. {
  1594. unsigned long val;
  1595. val = vcpu_get_psr(vcpu);
  1596. val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  1597. vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
  1598. }
  1599. void vcpu_increment_iip(struct kvm_vcpu *vcpu)
  1600. {
  1601. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1602. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1603. if (ipsr->ri == 2) {
  1604. ipsr->ri = 0;
  1605. regs->cr_iip += 16;
  1606. } else
  1607. ipsr->ri++;
  1608. }
  1609. void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
  1610. {
  1611. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1612. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1613. if (ipsr->ri == 0) {
  1614. ipsr->ri = 2;
  1615. regs->cr_iip -= 16;
  1616. } else
  1617. ipsr->ri--;
  1618. }
  1619. /** Emulate a privileged operation.
  1620. *
  1621. *
  1622. * @param vcpu virtual cpu
  1623. * @cause the reason cause virtualization fault
  1624. * @opcode the instruction code which cause virtualization fault
  1625. */
  1626. void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
  1627. {
  1628. unsigned long status, cause, opcode ;
  1629. INST64 inst;
  1630. status = IA64_NO_FAULT;
  1631. cause = VMX(vcpu, cause);
  1632. opcode = VMX(vcpu, opcode);
  1633. inst.inst = opcode;
  1634. /*
  1635. * Switch to actual virtual rid in rr0 and rr4,
  1636. * which is required by some tlb related instructions.
  1637. */
  1638. prepare_if_physical_mode(vcpu);
  1639. switch (cause) {
  1640. case EVENT_RSM:
  1641. kvm_rsm(vcpu, inst);
  1642. break;
  1643. case EVENT_SSM:
  1644. kvm_ssm(vcpu, inst);
  1645. break;
  1646. case EVENT_MOV_TO_PSR:
  1647. kvm_mov_to_psr(vcpu, inst);
  1648. break;
  1649. case EVENT_MOV_FROM_PSR:
  1650. kvm_mov_from_psr(vcpu, inst);
  1651. break;
  1652. case EVENT_MOV_FROM_CR:
  1653. kvm_mov_from_cr(vcpu, inst);
  1654. break;
  1655. case EVENT_MOV_TO_CR:
  1656. kvm_mov_to_cr(vcpu, inst);
  1657. break;
  1658. case EVENT_BSW_0:
  1659. vcpu_bsw0(vcpu);
  1660. break;
  1661. case EVENT_BSW_1:
  1662. vcpu_bsw1(vcpu);
  1663. break;
  1664. case EVENT_COVER:
  1665. vcpu_cover(vcpu);
  1666. break;
  1667. case EVENT_RFI:
  1668. vcpu_rfi(vcpu);
  1669. break;
  1670. case EVENT_ITR_D:
  1671. kvm_itr_d(vcpu, inst);
  1672. break;
  1673. case EVENT_ITR_I:
  1674. kvm_itr_i(vcpu, inst);
  1675. break;
  1676. case EVENT_PTR_D:
  1677. kvm_ptr_d(vcpu, inst);
  1678. break;
  1679. case EVENT_PTR_I:
  1680. kvm_ptr_i(vcpu, inst);
  1681. break;
  1682. case EVENT_ITC_D:
  1683. kvm_itc_d(vcpu, inst);
  1684. break;
  1685. case EVENT_ITC_I:
  1686. kvm_itc_i(vcpu, inst);
  1687. break;
  1688. case EVENT_PTC_L:
  1689. kvm_ptc_l(vcpu, inst);
  1690. break;
  1691. case EVENT_PTC_G:
  1692. kvm_ptc_g(vcpu, inst);
  1693. break;
  1694. case EVENT_PTC_GA:
  1695. kvm_ptc_ga(vcpu, inst);
  1696. break;
  1697. case EVENT_PTC_E:
  1698. kvm_ptc_e(vcpu, inst);
  1699. break;
  1700. case EVENT_MOV_TO_RR:
  1701. kvm_mov_to_rr(vcpu, inst);
  1702. break;
  1703. case EVENT_MOV_FROM_RR:
  1704. kvm_mov_from_rr(vcpu, inst);
  1705. break;
  1706. case EVENT_THASH:
  1707. kvm_thash(vcpu, inst);
  1708. break;
  1709. case EVENT_TTAG:
  1710. kvm_ttag(vcpu, inst);
  1711. break;
  1712. case EVENT_TPA:
  1713. status = kvm_tpa(vcpu, inst);
  1714. break;
  1715. case EVENT_TAK:
  1716. kvm_tak(vcpu, inst);
  1717. break;
  1718. case EVENT_MOV_TO_AR_IMM:
  1719. kvm_mov_to_ar_imm(vcpu, inst);
  1720. break;
  1721. case EVENT_MOV_TO_AR:
  1722. kvm_mov_to_ar_reg(vcpu, inst);
  1723. break;
  1724. case EVENT_MOV_FROM_AR:
  1725. kvm_mov_from_ar_reg(vcpu, inst);
  1726. break;
  1727. case EVENT_MOV_TO_DBR:
  1728. kvm_mov_to_dbr(vcpu, inst);
  1729. break;
  1730. case EVENT_MOV_TO_IBR:
  1731. kvm_mov_to_ibr(vcpu, inst);
  1732. break;
  1733. case EVENT_MOV_TO_PMC:
  1734. kvm_mov_to_pmc(vcpu, inst);
  1735. break;
  1736. case EVENT_MOV_TO_PMD:
  1737. kvm_mov_to_pmd(vcpu, inst);
  1738. break;
  1739. case EVENT_MOV_TO_PKR:
  1740. kvm_mov_to_pkr(vcpu, inst);
  1741. break;
  1742. case EVENT_MOV_FROM_DBR:
  1743. kvm_mov_from_dbr(vcpu, inst);
  1744. break;
  1745. case EVENT_MOV_FROM_IBR:
  1746. kvm_mov_from_ibr(vcpu, inst);
  1747. break;
  1748. case EVENT_MOV_FROM_PMC:
  1749. kvm_mov_from_pmc(vcpu, inst);
  1750. break;
  1751. case EVENT_MOV_FROM_PKR:
  1752. kvm_mov_from_pkr(vcpu, inst);
  1753. break;
  1754. case EVENT_MOV_FROM_CPUID:
  1755. kvm_mov_from_cpuid(vcpu, inst);
  1756. break;
  1757. case EVENT_VMSW:
  1758. status = IA64_FAULT;
  1759. break;
  1760. default:
  1761. break;
  1762. };
  1763. /*Assume all status is NO_FAULT ?*/
  1764. if (status == IA64_NO_FAULT && cause != EVENT_RFI)
  1765. vcpu_increment_iip(vcpu);
  1766. recover_if_physical_mode(vcpu);
  1767. }
  1768. void init_vcpu(struct kvm_vcpu *vcpu)
  1769. {
  1770. int i;
  1771. vcpu->arch.mode_flags = GUEST_IN_PHY;
  1772. VMX(vcpu, vrr[0]) = 0x38;
  1773. VMX(vcpu, vrr[1]) = 0x38;
  1774. VMX(vcpu, vrr[2]) = 0x38;
  1775. VMX(vcpu, vrr[3]) = 0x38;
  1776. VMX(vcpu, vrr[4]) = 0x38;
  1777. VMX(vcpu, vrr[5]) = 0x38;
  1778. VMX(vcpu, vrr[6]) = 0x38;
  1779. VMX(vcpu, vrr[7]) = 0x38;
  1780. VCPU(vcpu, vpsr) = IA64_PSR_BN;
  1781. VCPU(vcpu, dcr) = 0;
  1782. /* pta.size must not be 0. The minimum is 15 (32k) */
  1783. VCPU(vcpu, pta) = 15 << 2;
  1784. VCPU(vcpu, itv) = 0x10000;
  1785. VCPU(vcpu, itm) = 0;
  1786. VMX(vcpu, last_itc) = 0;
  1787. VCPU(vcpu, lid) = VCPU_LID(vcpu);
  1788. VCPU(vcpu, ivr) = 0;
  1789. VCPU(vcpu, tpr) = 0x10000;
  1790. VCPU(vcpu, eoi) = 0;
  1791. VCPU(vcpu, irr[0]) = 0;
  1792. VCPU(vcpu, irr[1]) = 0;
  1793. VCPU(vcpu, irr[2]) = 0;
  1794. VCPU(vcpu, irr[3]) = 0;
  1795. VCPU(vcpu, pmv) = 0x10000;
  1796. VCPU(vcpu, cmcv) = 0x10000;
  1797. VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
  1798. VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
  1799. update_vhpi(vcpu, NULL_VECTOR);
  1800. VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
  1801. for (i = 0; i < 4; i++)
  1802. VLSAPIC_INSVC(vcpu, i) = 0;
  1803. }
  1804. void kvm_init_all_rr(struct kvm_vcpu *vcpu)
  1805. {
  1806. unsigned long psr;
  1807. local_irq_save(psr);
  1808. /* WARNING: not allow co-exist of both virtual mode and physical
  1809. * mode in same region
  1810. */
  1811. vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
  1812. vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
  1813. if (is_physical_mode(vcpu)) {
  1814. if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  1815. panic_vm(vcpu, "Machine Status conflicts!\n");
  1816. ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
  1817. ia64_dv_serialize_data();
  1818. ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
  1819. ia64_dv_serialize_data();
  1820. } else {
  1821. ia64_set_rr((VRN0 << VRN_SHIFT),
  1822. vcpu->arch.metaphysical_saved_rr0);
  1823. ia64_dv_serialize_data();
  1824. ia64_set_rr((VRN4 << VRN_SHIFT),
  1825. vcpu->arch.metaphysical_saved_rr4);
  1826. ia64_dv_serialize_data();
  1827. }
  1828. ia64_set_rr((VRN1 << VRN_SHIFT),
  1829. vrrtomrr(VMX(vcpu, vrr[VRN1])));
  1830. ia64_dv_serialize_data();
  1831. ia64_set_rr((VRN2 << VRN_SHIFT),
  1832. vrrtomrr(VMX(vcpu, vrr[VRN2])));
  1833. ia64_dv_serialize_data();
  1834. ia64_set_rr((VRN3 << VRN_SHIFT),
  1835. vrrtomrr(VMX(vcpu, vrr[VRN3])));
  1836. ia64_dv_serialize_data();
  1837. ia64_set_rr((VRN5 << VRN_SHIFT),
  1838. vrrtomrr(VMX(vcpu, vrr[VRN5])));
  1839. ia64_dv_serialize_data();
  1840. ia64_set_rr((VRN7 << VRN_SHIFT),
  1841. vrrtomrr(VMX(vcpu, vrr[VRN7])));
  1842. ia64_dv_serialize_data();
  1843. ia64_srlz_d();
  1844. ia64_set_psr(psr);
  1845. }
  1846. int vmm_entry(void)
  1847. {
  1848. struct kvm_vcpu *v;
  1849. v = current_vcpu;
  1850. ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
  1851. 0, 0, 0, 0, 0, 0);
  1852. kvm_init_vtlb(v);
  1853. kvm_init_vhpt(v);
  1854. init_vcpu(v);
  1855. kvm_init_all_rr(v);
  1856. vmm_reset_entry();
  1857. return 0;
  1858. }
  1859. static void kvm_show_registers(struct kvm_pt_regs *regs)
  1860. {
  1861. unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
  1862. struct kvm_vcpu *vcpu = current_vcpu;
  1863. if (vcpu != NULL)
  1864. printk("vcpu 0x%p vcpu %d\n",
  1865. vcpu, vcpu->vcpu_id);
  1866. printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
  1867. regs->cr_ipsr, regs->cr_ifs, ip);
  1868. printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
  1869. regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
  1870. printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
  1871. regs->ar_rnat, regs->ar_bspstore, regs->pr);
  1872. printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
  1873. regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
  1874. printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
  1875. printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
  1876. regs->b6, regs->b7);
  1877. printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
  1878. regs->f6.u.bits[1], regs->f6.u.bits[0],
  1879. regs->f7.u.bits[1], regs->f7.u.bits[0]);
  1880. printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
  1881. regs->f8.u.bits[1], regs->f8.u.bits[0],
  1882. regs->f9.u.bits[1], regs->f9.u.bits[0]);
  1883. printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
  1884. regs->f10.u.bits[1], regs->f10.u.bits[0],
  1885. regs->f11.u.bits[1], regs->f11.u.bits[0]);
  1886. printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
  1887. regs->r2, regs->r3);
  1888. printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
  1889. regs->r9, regs->r10);
  1890. printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
  1891. regs->r12, regs->r13);
  1892. printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
  1893. regs->r15, regs->r16);
  1894. printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
  1895. regs->r18, regs->r19);
  1896. printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
  1897. regs->r21, regs->r22);
  1898. printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
  1899. regs->r24, regs->r25);
  1900. printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
  1901. regs->r27, regs->r28);
  1902. printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
  1903. regs->r30, regs->r31);
  1904. }
  1905. void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
  1906. {
  1907. va_list args;
  1908. char buf[256];
  1909. struct kvm_pt_regs *regs = vcpu_regs(v);
  1910. struct exit_ctl_data *p = &v->arch.exit_data;
  1911. va_start(args, fmt);
  1912. vsnprintf(buf, sizeof(buf), fmt, args);
  1913. va_end(args);
  1914. printk(buf);
  1915. kvm_show_registers(regs);
  1916. p->exit_reason = EXIT_REASON_VM_PANIC;
  1917. vmm_transition(v);
  1918. /*Never to return*/
  1919. while (1);
  1920. }