vcpu.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190
  1. /*
  2. * kvm_vcpu.c: handling all virtual cpu related thing.
  3. * Copyright (c) 2005, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Shaofan Li (Susue Li) <susie.li@intel.com>
  19. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  20. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21. * Xiantao Zhang <xiantao.zhang@intel.com>
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <asm/processor.h>
  26. #include <asm/ia64regs.h>
  27. #include <asm/gcc_intrin.h>
  28. #include <asm/kregs.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/tlb.h>
  31. #include "asm-offsets.h"
  32. #include "vcpu.h"
  33. /*
  34. * Special notes:
  35. * - Index by it/dt/rt sequence
  36. * - Only existing mode transitions are allowed in this table
  37. * - RSE is placed at lazy mode when emulating guest partial mode
  38. * - If gva happens to be rr0 and rr4, only allowed case is identity
  39. * mapping (gva=gpa), or panic! (How?)
  40. */
  41. int mm_switch_table[8][8] = {
  42. /* 2004/09/12(Kevin): Allow switch to self */
  43. /*
  44. * (it,dt,rt): (0,0,0) -> (1,1,1)
  45. * This kind of transition usually occurs in the very early
  46. * stage of Linux boot up procedure. Another case is in efi
  47. * and pal calls. (see "arch/ia64/kernel/head.S")
  48. *
  49. * (it,dt,rt): (0,0,0) -> (0,1,1)
  50. * This kind of transition is found when OSYa exits efi boot
  51. * service. Due to gva = gpa in this case (Same region),
  52. * data access can be satisfied though itlb entry for physical
  53. * emulation is hit.
  54. */
  55. {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
  56. {0, 0, 0, 0, 0, 0, 0, 0},
  57. {0, 0, 0, 0, 0, 0, 0, 0},
  58. /*
  59. * (it,dt,rt): (0,1,1) -> (1,1,1)
  60. * This kind of transition is found in OSYa.
  61. *
  62. * (it,dt,rt): (0,1,1) -> (0,0,0)
  63. * This kind of transition is found in OSYa
  64. */
  65. {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
  66. /* (1,0,0)->(1,1,1) */
  67. {0, 0, 0, 0, 0, 0, 0, SW_P2V},
  68. /*
  69. * (it,dt,rt): (1,0,1) -> (1,1,1)
  70. * This kind of transition usually occurs when Linux returns
  71. * from the low level TLB miss handlers.
  72. * (see "arch/ia64/kernel/ivt.S")
  73. */
  74. {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
  75. {0, 0, 0, 0, 0, 0, 0, 0},
  76. /*
  77. * (it,dt,rt): (1,1,1) -> (1,0,1)
  78. * This kind of transition usually occurs in Linux low level
  79. * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
  80. *
  81. * (it,dt,rt): (1,1,1) -> (0,0,0)
  82. * This kind of transition usually occurs in pal and efi calls,
  83. * which requires running in physical mode.
  84. * (see "arch/ia64/kernel/head.S")
  85. * (1,1,1)->(1,0,0)
  86. */
  87. {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
  88. };
  89. void physical_mode_init(struct kvm_vcpu *vcpu)
  90. {
  91. vcpu->arch.mode_flags = GUEST_IN_PHY;
  92. }
  93. void switch_to_physical_rid(struct kvm_vcpu *vcpu)
  94. {
  95. unsigned long psr;
  96. /* Save original virtual mode rr[0] and rr[4] */
  97. psr = ia64_clear_ic();
  98. ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
  99. ia64_srlz_d();
  100. ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
  101. ia64_srlz_d();
  102. ia64_set_psr(psr);
  103. return;
  104. }
  105. void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
  106. {
  107. unsigned long psr;
  108. psr = ia64_clear_ic();
  109. ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
  110. ia64_srlz_d();
  111. ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
  112. ia64_srlz_d();
  113. ia64_set_psr(psr);
  114. return;
  115. }
  116. static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
  117. {
  118. return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  119. }
  120. void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  121. struct ia64_psr new_psr)
  122. {
  123. int act;
  124. act = mm_switch_action(old_psr, new_psr);
  125. switch (act) {
  126. case SW_V2P:
  127. /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
  128. old_psr.val, new_psr.val);*/
  129. switch_to_physical_rid(vcpu);
  130. /*
  131. * Set rse to enforced lazy, to prevent active rse
  132. *save/restor when guest physical mode.
  133. */
  134. vcpu->arch.mode_flags |= GUEST_IN_PHY;
  135. break;
  136. case SW_P2V:
  137. switch_to_virtual_rid(vcpu);
  138. /*
  139. * recover old mode which is saved when entering
  140. * guest physical mode
  141. */
  142. vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  143. break;
  144. case SW_SELF:
  145. break;
  146. case SW_NOP:
  147. break;
  148. default:
  149. /* Sanity check */
  150. break;
  151. }
  152. return;
  153. }
  154. /*
  155. * In physical mode, insert tc/tr for region 0 and 4 uses
  156. * RID[0] and RID[4] which is for physical mode emulation.
  157. * However what those inserted tc/tr wants is rid for
  158. * virtual mode. So original virtual rid needs to be restored
  159. * before insert.
  160. *
  161. * Operations which required such switch include:
  162. * - insertions (itc.*, itr.*)
  163. * - purges (ptc.* and ptr.*)
  164. * - tpa
  165. * - tak
  166. * - thash?, ttag?
  167. * All above needs actual virtual rid for destination entry.
  168. */
  169. void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  170. struct ia64_psr new_psr)
  171. {
  172. if ((old_psr.dt != new_psr.dt)
  173. || (old_psr.it != new_psr.it)
  174. || (old_psr.rt != new_psr.rt))
  175. switch_mm_mode(vcpu, old_psr, new_psr);
  176. return;
  177. }
  178. /*
  179. * In physical mode, insert tc/tr for region 0 and 4 uses
  180. * RID[0] and RID[4] which is for physical mode emulation.
  181. * However what those inserted tc/tr wants is rid for
  182. * virtual mode. So original virtual rid needs to be restored
  183. * before insert.
  184. *
  185. * Operations which required such switch include:
  186. * - insertions (itc.*, itr.*)
  187. * - purges (ptc.* and ptr.*)
  188. * - tpa
  189. * - tak
  190. * - thash?, ttag?
  191. * All above needs actual virtual rid for destination entry.
  192. */
  193. void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
  194. {
  195. if (is_physical_mode(vcpu)) {
  196. vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  197. switch_to_virtual_rid(vcpu);
  198. }
  199. return;
  200. }
  201. /* Recover always follows prepare */
  202. void recover_if_physical_mode(struct kvm_vcpu *vcpu)
  203. {
  204. if (is_physical_mode(vcpu))
  205. switch_to_physical_rid(vcpu);
  206. vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  207. return;
  208. }
  209. #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
  210. static u16 gr_info[32] = {
  211. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  212. RPT(r1), RPT(r2), RPT(r3),
  213. RPT(r4), RPT(r5), RPT(r6), RPT(r7),
  214. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  215. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  216. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  217. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  218. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  219. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  220. };
  221. #define IA64_FIRST_STACKED_GR 32
  222. #define IA64_FIRST_ROTATING_FR 32
  223. static inline unsigned long
  224. rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
  225. {
  226. reg += rrb;
  227. if (reg >= sor)
  228. reg -= sor;
  229. return reg;
  230. }
  231. /*
  232. * Return the (rotated) index for floating point register
  233. * be in the REGNUM (REGNUM must range from 32-127,
  234. * result is in the range from 0-95.
  235. */
  236. static inline unsigned long fph_index(struct kvm_pt_regs *regs,
  237. long regnum)
  238. {
  239. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  240. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  241. }
  242. /*
  243. * The inverse of the above: given bspstore and the number of
  244. * registers, calculate ar.bsp.
  245. */
  246. static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
  247. long num_regs)
  248. {
  249. long delta = ia64_rse_slot_num(addr) + num_regs;
  250. int i = 0;
  251. if (num_regs < 0)
  252. delta -= 0x3e;
  253. if (delta < 0) {
  254. while (delta <= -0x3f) {
  255. i--;
  256. delta += 0x3f;
  257. }
  258. } else {
  259. while (delta >= 0x3f) {
  260. i++;
  261. delta -= 0x3f;
  262. }
  263. }
  264. return addr + num_regs + i;
  265. }
  266. static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  267. unsigned long *val, int *nat)
  268. {
  269. unsigned long *bsp, *addr, *rnat_addr, *bspstore;
  270. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  271. unsigned long nat_mask;
  272. unsigned long old_rsc, new_rsc;
  273. long sof = (regs->cr_ifs) & 0x7f;
  274. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  275. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  276. long ridx = r1 - 32;
  277. if (ridx < sor)
  278. ridx = rotate_reg(sor, rrb_gr, ridx);
  279. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  280. new_rsc = old_rsc&(~(0x3));
  281. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  282. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  283. bsp = kbs + (regs->loadrs >> 19);
  284. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. rnat_addr = ia64_rse_rnat_addr(addr);
  287. if (addr >= bspstore) {
  288. ia64_flushrs();
  289. ia64_mf();
  290. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  291. }
  292. *val = *addr;
  293. if (nat) {
  294. if (bspstore < rnat_addr)
  295. *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
  296. & nat_mask);
  297. else
  298. *nat = (int)!!((*rnat_addr) & nat_mask);
  299. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  300. }
  301. }
  302. void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  303. unsigned long val, unsigned long nat)
  304. {
  305. unsigned long *bsp, *bspstore, *addr, *rnat_addr;
  306. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  307. unsigned long nat_mask;
  308. unsigned long old_rsc, new_rsc, psr;
  309. unsigned long rnat;
  310. long sof = (regs->cr_ifs) & 0x7f;
  311. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  312. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  313. long ridx = r1 - 32;
  314. if (ridx < sor)
  315. ridx = rotate_reg(sor, rrb_gr, ridx);
  316. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  317. /* put RSC to lazy mode, and set loadrs 0 */
  318. new_rsc = old_rsc & (~0x3fff0003);
  319. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  320. bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
  321. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  322. nat_mask = 1UL << ia64_rse_slot_num(addr);
  323. rnat_addr = ia64_rse_rnat_addr(addr);
  324. local_irq_save(psr);
  325. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  326. if (addr >= bspstore) {
  327. ia64_flushrs();
  328. ia64_mf();
  329. *addr = val;
  330. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  331. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  332. if (bspstore < rnat_addr)
  333. rnat = rnat & (~nat_mask);
  334. else
  335. *rnat_addr = (*rnat_addr)&(~nat_mask);
  336. ia64_mf();
  337. ia64_loadrs();
  338. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  339. } else {
  340. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  341. *addr = val;
  342. if (bspstore < rnat_addr)
  343. rnat = rnat&(~nat_mask);
  344. else
  345. *rnat_addr = (*rnat_addr) & (~nat_mask);
  346. ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
  347. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  348. }
  349. local_irq_restore(psr);
  350. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  351. }
  352. void getreg(unsigned long regnum, unsigned long *val,
  353. int *nat, struct kvm_pt_regs *regs)
  354. {
  355. unsigned long addr, *unat;
  356. if (regnum >= IA64_FIRST_STACKED_GR) {
  357. get_rse_reg(regs, regnum, val, nat);
  358. return;
  359. }
  360. /*
  361. * Now look at registers in [0-31] range and init correct UNAT
  362. */
  363. addr = (unsigned long)regs;
  364. unat = &regs->eml_unat;;
  365. addr += gr_info[regnum];
  366. *val = *(unsigned long *)addr;
  367. /*
  368. * do it only when requested
  369. */
  370. if (nat)
  371. *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
  372. }
  373. void setreg(unsigned long regnum, unsigned long val,
  374. int nat, struct kvm_pt_regs *regs)
  375. {
  376. unsigned long addr;
  377. unsigned long bitmask;
  378. unsigned long *unat;
  379. /*
  380. * First takes care of stacked registers
  381. */
  382. if (regnum >= IA64_FIRST_STACKED_GR) {
  383. set_rse_reg(regs, regnum, val, nat);
  384. return;
  385. }
  386. /*
  387. * Now look at registers in [0-31] range and init correct UNAT
  388. */
  389. addr = (unsigned long)regs;
  390. unat = &regs->eml_unat;
  391. /*
  392. * add offset from base of struct
  393. * and do it !
  394. */
  395. addr += gr_info[regnum];
  396. *(unsigned long *)addr = val;
  397. /*
  398. * We need to clear the corresponding UNAT bit to fully emulate the load
  399. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  400. */
  401. bitmask = 1UL << ((addr >> 3) & 0x3f);
  402. if (nat)
  403. *unat |= bitmask;
  404. else
  405. *unat &= ~bitmask;
  406. }
  407. u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
  408. {
  409. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  410. u64 val;
  411. if (!reg)
  412. return 0;
  413. getreg(reg, &val, 0, regs);
  414. return val;
  415. }
  416. void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
  417. {
  418. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  419. long sof = (regs->cr_ifs) & 0x7f;
  420. if (!reg)
  421. return;
  422. if (reg >= sof + 32)
  423. return;
  424. setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
  425. }
  426. void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  427. struct kvm_pt_regs *regs)
  428. {
  429. /* Take floating register rotation into consideration*/
  430. if (regnum >= IA64_FIRST_ROTATING_FR)
  431. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  432. #define CASE_FIXED_FP(reg) \
  433. case (reg) : \
  434. ia64_stf_spill(fpval, reg); \
  435. break
  436. switch (regnum) {
  437. CASE_FIXED_FP(0);
  438. CASE_FIXED_FP(1);
  439. CASE_FIXED_FP(2);
  440. CASE_FIXED_FP(3);
  441. CASE_FIXED_FP(4);
  442. CASE_FIXED_FP(5);
  443. CASE_FIXED_FP(6);
  444. CASE_FIXED_FP(7);
  445. CASE_FIXED_FP(8);
  446. CASE_FIXED_FP(9);
  447. CASE_FIXED_FP(10);
  448. CASE_FIXED_FP(11);
  449. CASE_FIXED_FP(12);
  450. CASE_FIXED_FP(13);
  451. CASE_FIXED_FP(14);
  452. CASE_FIXED_FP(15);
  453. CASE_FIXED_FP(16);
  454. CASE_FIXED_FP(17);
  455. CASE_FIXED_FP(18);
  456. CASE_FIXED_FP(19);
  457. CASE_FIXED_FP(20);
  458. CASE_FIXED_FP(21);
  459. CASE_FIXED_FP(22);
  460. CASE_FIXED_FP(23);
  461. CASE_FIXED_FP(24);
  462. CASE_FIXED_FP(25);
  463. CASE_FIXED_FP(26);
  464. CASE_FIXED_FP(27);
  465. CASE_FIXED_FP(28);
  466. CASE_FIXED_FP(29);
  467. CASE_FIXED_FP(30);
  468. CASE_FIXED_FP(31);
  469. CASE_FIXED_FP(32);
  470. CASE_FIXED_FP(33);
  471. CASE_FIXED_FP(34);
  472. CASE_FIXED_FP(35);
  473. CASE_FIXED_FP(36);
  474. CASE_FIXED_FP(37);
  475. CASE_FIXED_FP(38);
  476. CASE_FIXED_FP(39);
  477. CASE_FIXED_FP(40);
  478. CASE_FIXED_FP(41);
  479. CASE_FIXED_FP(42);
  480. CASE_FIXED_FP(43);
  481. CASE_FIXED_FP(44);
  482. CASE_FIXED_FP(45);
  483. CASE_FIXED_FP(46);
  484. CASE_FIXED_FP(47);
  485. CASE_FIXED_FP(48);
  486. CASE_FIXED_FP(49);
  487. CASE_FIXED_FP(50);
  488. CASE_FIXED_FP(51);
  489. CASE_FIXED_FP(52);
  490. CASE_FIXED_FP(53);
  491. CASE_FIXED_FP(54);
  492. CASE_FIXED_FP(55);
  493. CASE_FIXED_FP(56);
  494. CASE_FIXED_FP(57);
  495. CASE_FIXED_FP(58);
  496. CASE_FIXED_FP(59);
  497. CASE_FIXED_FP(60);
  498. CASE_FIXED_FP(61);
  499. CASE_FIXED_FP(62);
  500. CASE_FIXED_FP(63);
  501. CASE_FIXED_FP(64);
  502. CASE_FIXED_FP(65);
  503. CASE_FIXED_FP(66);
  504. CASE_FIXED_FP(67);
  505. CASE_FIXED_FP(68);
  506. CASE_FIXED_FP(69);
  507. CASE_FIXED_FP(70);
  508. CASE_FIXED_FP(71);
  509. CASE_FIXED_FP(72);
  510. CASE_FIXED_FP(73);
  511. CASE_FIXED_FP(74);
  512. CASE_FIXED_FP(75);
  513. CASE_FIXED_FP(76);
  514. CASE_FIXED_FP(77);
  515. CASE_FIXED_FP(78);
  516. CASE_FIXED_FP(79);
  517. CASE_FIXED_FP(80);
  518. CASE_FIXED_FP(81);
  519. CASE_FIXED_FP(82);
  520. CASE_FIXED_FP(83);
  521. CASE_FIXED_FP(84);
  522. CASE_FIXED_FP(85);
  523. CASE_FIXED_FP(86);
  524. CASE_FIXED_FP(87);
  525. CASE_FIXED_FP(88);
  526. CASE_FIXED_FP(89);
  527. CASE_FIXED_FP(90);
  528. CASE_FIXED_FP(91);
  529. CASE_FIXED_FP(92);
  530. CASE_FIXED_FP(93);
  531. CASE_FIXED_FP(94);
  532. CASE_FIXED_FP(95);
  533. CASE_FIXED_FP(96);
  534. CASE_FIXED_FP(97);
  535. CASE_FIXED_FP(98);
  536. CASE_FIXED_FP(99);
  537. CASE_FIXED_FP(100);
  538. CASE_FIXED_FP(101);
  539. CASE_FIXED_FP(102);
  540. CASE_FIXED_FP(103);
  541. CASE_FIXED_FP(104);
  542. CASE_FIXED_FP(105);
  543. CASE_FIXED_FP(106);
  544. CASE_FIXED_FP(107);
  545. CASE_FIXED_FP(108);
  546. CASE_FIXED_FP(109);
  547. CASE_FIXED_FP(110);
  548. CASE_FIXED_FP(111);
  549. CASE_FIXED_FP(112);
  550. CASE_FIXED_FP(113);
  551. CASE_FIXED_FP(114);
  552. CASE_FIXED_FP(115);
  553. CASE_FIXED_FP(116);
  554. CASE_FIXED_FP(117);
  555. CASE_FIXED_FP(118);
  556. CASE_FIXED_FP(119);
  557. CASE_FIXED_FP(120);
  558. CASE_FIXED_FP(121);
  559. CASE_FIXED_FP(122);
  560. CASE_FIXED_FP(123);
  561. CASE_FIXED_FP(124);
  562. CASE_FIXED_FP(125);
  563. CASE_FIXED_FP(126);
  564. CASE_FIXED_FP(127);
  565. }
  566. #undef CASE_FIXED_FP
  567. }
  568. void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  569. struct kvm_pt_regs *regs)
  570. {
  571. /* Take floating register rotation into consideration*/
  572. if (regnum >= IA64_FIRST_ROTATING_FR)
  573. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  574. #define CASE_FIXED_FP(reg) \
  575. case (reg) : \
  576. ia64_ldf_fill(reg, fpval); \
  577. break
  578. switch (regnum) {
  579. CASE_FIXED_FP(2);
  580. CASE_FIXED_FP(3);
  581. CASE_FIXED_FP(4);
  582. CASE_FIXED_FP(5);
  583. CASE_FIXED_FP(6);
  584. CASE_FIXED_FP(7);
  585. CASE_FIXED_FP(8);
  586. CASE_FIXED_FP(9);
  587. CASE_FIXED_FP(10);
  588. CASE_FIXED_FP(11);
  589. CASE_FIXED_FP(12);
  590. CASE_FIXED_FP(13);
  591. CASE_FIXED_FP(14);
  592. CASE_FIXED_FP(15);
  593. CASE_FIXED_FP(16);
  594. CASE_FIXED_FP(17);
  595. CASE_FIXED_FP(18);
  596. CASE_FIXED_FP(19);
  597. CASE_FIXED_FP(20);
  598. CASE_FIXED_FP(21);
  599. CASE_FIXED_FP(22);
  600. CASE_FIXED_FP(23);
  601. CASE_FIXED_FP(24);
  602. CASE_FIXED_FP(25);
  603. CASE_FIXED_FP(26);
  604. CASE_FIXED_FP(27);
  605. CASE_FIXED_FP(28);
  606. CASE_FIXED_FP(29);
  607. CASE_FIXED_FP(30);
  608. CASE_FIXED_FP(31);
  609. CASE_FIXED_FP(32);
  610. CASE_FIXED_FP(33);
  611. CASE_FIXED_FP(34);
  612. CASE_FIXED_FP(35);
  613. CASE_FIXED_FP(36);
  614. CASE_FIXED_FP(37);
  615. CASE_FIXED_FP(38);
  616. CASE_FIXED_FP(39);
  617. CASE_FIXED_FP(40);
  618. CASE_FIXED_FP(41);
  619. CASE_FIXED_FP(42);
  620. CASE_FIXED_FP(43);
  621. CASE_FIXED_FP(44);
  622. CASE_FIXED_FP(45);
  623. CASE_FIXED_FP(46);
  624. CASE_FIXED_FP(47);
  625. CASE_FIXED_FP(48);
  626. CASE_FIXED_FP(49);
  627. CASE_FIXED_FP(50);
  628. CASE_FIXED_FP(51);
  629. CASE_FIXED_FP(52);
  630. CASE_FIXED_FP(53);
  631. CASE_FIXED_FP(54);
  632. CASE_FIXED_FP(55);
  633. CASE_FIXED_FP(56);
  634. CASE_FIXED_FP(57);
  635. CASE_FIXED_FP(58);
  636. CASE_FIXED_FP(59);
  637. CASE_FIXED_FP(60);
  638. CASE_FIXED_FP(61);
  639. CASE_FIXED_FP(62);
  640. CASE_FIXED_FP(63);
  641. CASE_FIXED_FP(64);
  642. CASE_FIXED_FP(65);
  643. CASE_FIXED_FP(66);
  644. CASE_FIXED_FP(67);
  645. CASE_FIXED_FP(68);
  646. CASE_FIXED_FP(69);
  647. CASE_FIXED_FP(70);
  648. CASE_FIXED_FP(71);
  649. CASE_FIXED_FP(72);
  650. CASE_FIXED_FP(73);
  651. CASE_FIXED_FP(74);
  652. CASE_FIXED_FP(75);
  653. CASE_FIXED_FP(76);
  654. CASE_FIXED_FP(77);
  655. CASE_FIXED_FP(78);
  656. CASE_FIXED_FP(79);
  657. CASE_FIXED_FP(80);
  658. CASE_FIXED_FP(81);
  659. CASE_FIXED_FP(82);
  660. CASE_FIXED_FP(83);
  661. CASE_FIXED_FP(84);
  662. CASE_FIXED_FP(85);
  663. CASE_FIXED_FP(86);
  664. CASE_FIXED_FP(87);
  665. CASE_FIXED_FP(88);
  666. CASE_FIXED_FP(89);
  667. CASE_FIXED_FP(90);
  668. CASE_FIXED_FP(91);
  669. CASE_FIXED_FP(92);
  670. CASE_FIXED_FP(93);
  671. CASE_FIXED_FP(94);
  672. CASE_FIXED_FP(95);
  673. CASE_FIXED_FP(96);
  674. CASE_FIXED_FP(97);
  675. CASE_FIXED_FP(98);
  676. CASE_FIXED_FP(99);
  677. CASE_FIXED_FP(100);
  678. CASE_FIXED_FP(101);
  679. CASE_FIXED_FP(102);
  680. CASE_FIXED_FP(103);
  681. CASE_FIXED_FP(104);
  682. CASE_FIXED_FP(105);
  683. CASE_FIXED_FP(106);
  684. CASE_FIXED_FP(107);
  685. CASE_FIXED_FP(108);
  686. CASE_FIXED_FP(109);
  687. CASE_FIXED_FP(110);
  688. CASE_FIXED_FP(111);
  689. CASE_FIXED_FP(112);
  690. CASE_FIXED_FP(113);
  691. CASE_FIXED_FP(114);
  692. CASE_FIXED_FP(115);
  693. CASE_FIXED_FP(116);
  694. CASE_FIXED_FP(117);
  695. CASE_FIXED_FP(118);
  696. CASE_FIXED_FP(119);
  697. CASE_FIXED_FP(120);
  698. CASE_FIXED_FP(121);
  699. CASE_FIXED_FP(122);
  700. CASE_FIXED_FP(123);
  701. CASE_FIXED_FP(124);
  702. CASE_FIXED_FP(125);
  703. CASE_FIXED_FP(126);
  704. CASE_FIXED_FP(127);
  705. }
  706. }
  707. void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  708. struct ia64_fpreg *val)
  709. {
  710. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  711. getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  712. }
  713. void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  714. struct ia64_fpreg *val)
  715. {
  716. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  717. if (reg > 1)
  718. setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  719. }
  720. /************************************************************************
  721. * lsapic timer
  722. ***********************************************************************/
  723. u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
  724. {
  725. unsigned long guest_itc;
  726. guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
  727. if (guest_itc >= VMX(vcpu, last_itc)) {
  728. VMX(vcpu, last_itc) = guest_itc;
  729. return guest_itc;
  730. } else
  731. return VMX(vcpu, last_itc);
  732. }
  733. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
  734. static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
  735. {
  736. struct kvm_vcpu *v;
  737. int i;
  738. long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
  739. unsigned long vitv = VCPU(vcpu, itv);
  740. if (vcpu->vcpu_id == 0) {
  741. for (i = 0; i < KVM_MAX_VCPUS; i++) {
  742. v = (struct kvm_vcpu *)((char *)vcpu +
  743. sizeof(struct kvm_vcpu_data) * i);
  744. VMX(v, itc_offset) = itc_offset;
  745. VMX(v, last_itc) = 0;
  746. }
  747. }
  748. VMX(vcpu, last_itc) = 0;
  749. if (VCPU(vcpu, itm) <= val) {
  750. VMX(vcpu, itc_check) = 0;
  751. vcpu_unpend_interrupt(vcpu, vitv);
  752. } else {
  753. VMX(vcpu, itc_check) = 1;
  754. vcpu_set_itm(vcpu, VCPU(vcpu, itm));
  755. }
  756. }
  757. static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
  758. {
  759. return ((u64)VCPU(vcpu, itm));
  760. }
  761. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
  762. {
  763. unsigned long vitv = VCPU(vcpu, itv);
  764. VCPU(vcpu, itm) = val;
  765. if (val > vcpu_get_itc(vcpu)) {
  766. VMX(vcpu, itc_check) = 1;
  767. vcpu_unpend_interrupt(vcpu, vitv);
  768. VMX(vcpu, timer_pending) = 0;
  769. } else
  770. VMX(vcpu, itc_check) = 0;
  771. }
  772. #define ITV_VECTOR(itv) (itv&0xff)
  773. #define ITV_IRQ_MASK(itv) (itv&(1<<16))
  774. static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
  775. {
  776. VCPU(vcpu, itv) = val;
  777. if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
  778. vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
  779. vcpu->arch.timer_pending = 0;
  780. }
  781. }
  782. static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
  783. {
  784. int vec;
  785. vec = highest_inservice_irq(vcpu);
  786. if (vec == NULL_VECTOR)
  787. return;
  788. VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
  789. VCPU(vcpu, eoi) = 0;
  790. vcpu->arch.irq_new_pending = 1;
  791. }
  792. /* See Table 5-8 in SDM vol2 for the definition */
  793. int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
  794. {
  795. union ia64_tpr vtpr;
  796. vtpr.val = VCPU(vcpu, tpr);
  797. if (h_inservice == NMI_VECTOR)
  798. return IRQ_MASKED_BY_INSVC;
  799. if (h_pending == NMI_VECTOR) {
  800. /* Non Maskable Interrupt */
  801. return IRQ_NO_MASKED;
  802. }
  803. if (h_inservice == ExtINT_VECTOR)
  804. return IRQ_MASKED_BY_INSVC;
  805. if (h_pending == ExtINT_VECTOR) {
  806. if (vtpr.mmi) {
  807. /* mask all external IRQ */
  808. return IRQ_MASKED_BY_VTPR;
  809. } else
  810. return IRQ_NO_MASKED;
  811. }
  812. if (is_higher_irq(h_pending, h_inservice)) {
  813. if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
  814. return IRQ_NO_MASKED;
  815. else
  816. return IRQ_MASKED_BY_VTPR;
  817. } else {
  818. return IRQ_MASKED_BY_INSVC;
  819. }
  820. }
  821. void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  822. {
  823. long spsr;
  824. int ret;
  825. local_irq_save(spsr);
  826. ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
  827. local_irq_restore(spsr);
  828. vcpu->arch.irq_new_pending = 1;
  829. }
  830. void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  831. {
  832. long spsr;
  833. int ret;
  834. local_irq_save(spsr);
  835. ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
  836. local_irq_restore(spsr);
  837. if (ret) {
  838. vcpu->arch.irq_new_pending = 1;
  839. wmb();
  840. }
  841. }
  842. void update_vhpi(struct kvm_vcpu *vcpu, int vec)
  843. {
  844. u64 vhpi;
  845. if (vec == NULL_VECTOR)
  846. vhpi = 0;
  847. else if (vec == NMI_VECTOR)
  848. vhpi = 32;
  849. else if (vec == ExtINT_VECTOR)
  850. vhpi = 16;
  851. else
  852. vhpi = vec >> 4;
  853. VCPU(vcpu, vhpi) = vhpi;
  854. if (VCPU(vcpu, vac).a_int)
  855. ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
  856. (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
  857. }
  858. u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
  859. {
  860. int vec, h_inservice, mask;
  861. vec = highest_pending_irq(vcpu);
  862. h_inservice = highest_inservice_irq(vcpu);
  863. mask = irq_masked(vcpu, vec, h_inservice);
  864. if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
  865. if (VCPU(vcpu, vhpi))
  866. update_vhpi(vcpu, NULL_VECTOR);
  867. return IA64_SPURIOUS_INT_VECTOR;
  868. }
  869. if (mask == IRQ_MASKED_BY_VTPR) {
  870. update_vhpi(vcpu, vec);
  871. return IA64_SPURIOUS_INT_VECTOR;
  872. }
  873. VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
  874. vcpu_unpend_interrupt(vcpu, vec);
  875. return (u64)vec;
  876. }
  877. /**************************************************************************
  878. Privileged operation emulation routines
  879. **************************************************************************/
  880. u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
  881. {
  882. union ia64_pta vpta;
  883. union ia64_rr vrr;
  884. u64 pval;
  885. u64 vhpt_offset;
  886. vpta.val = vcpu_get_pta(vcpu);
  887. vrr.val = vcpu_get_rr(vcpu, vadr);
  888. vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
  889. if (vpta.vf) {
  890. pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
  891. vpta.val, 0, 0, 0, 0);
  892. } else {
  893. pval = (vadr & VRN_MASK) | vhpt_offset |
  894. (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
  895. }
  896. return pval;
  897. }
  898. u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
  899. {
  900. union ia64_rr vrr;
  901. union ia64_pta vpta;
  902. u64 pval;
  903. vpta.val = vcpu_get_pta(vcpu);
  904. vrr.val = vcpu_get_rr(vcpu, vadr);
  905. if (vpta.vf) {
  906. pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
  907. 0, 0, 0, 0, 0);
  908. } else
  909. pval = 1;
  910. return pval;
  911. }
  912. u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
  913. {
  914. struct thash_data *data;
  915. union ia64_pta vpta;
  916. u64 key;
  917. vpta.val = vcpu_get_pta(vcpu);
  918. if (vpta.vf == 0) {
  919. key = 1;
  920. return key;
  921. }
  922. data = vtlb_lookup(vcpu, vadr, D_TLB);
  923. if (!data || !data->p)
  924. key = 1;
  925. else
  926. key = data->key;
  927. return key;
  928. }
  929. void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
  930. {
  931. unsigned long thash, vadr;
  932. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  933. thash = vcpu_thash(vcpu, vadr);
  934. vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
  935. }
  936. void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
  937. {
  938. unsigned long tag, vadr;
  939. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  940. tag = vcpu_ttag(vcpu, vadr);
  941. vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
  942. }
  943. int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
  944. {
  945. struct thash_data *data;
  946. union ia64_isr visr, pt_isr;
  947. struct kvm_pt_regs *regs;
  948. struct ia64_psr vpsr;
  949. regs = vcpu_regs(vcpu);
  950. pt_isr.val = VMX(vcpu, cr_isr);
  951. visr.val = 0;
  952. visr.ei = pt_isr.ei;
  953. visr.ir = pt_isr.ir;
  954. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  955. visr.na = 1;
  956. data = vhpt_lookup(vadr);
  957. if (data) {
  958. if (data->p == 0) {
  959. vcpu_set_isr(vcpu, visr.val);
  960. data_page_not_present(vcpu, vadr);
  961. return IA64_FAULT;
  962. } else if (data->ma == VA_MATTR_NATPAGE) {
  963. vcpu_set_isr(vcpu, visr.val);
  964. dnat_page_consumption(vcpu, vadr);
  965. return IA64_FAULT;
  966. } else {
  967. *padr = (data->gpaddr >> data->ps << data->ps) |
  968. (vadr & (PSIZE(data->ps) - 1));
  969. return IA64_NO_FAULT;
  970. }
  971. }
  972. data = vtlb_lookup(vcpu, vadr, D_TLB);
  973. if (data) {
  974. if (data->p == 0) {
  975. vcpu_set_isr(vcpu, visr.val);
  976. data_page_not_present(vcpu, vadr);
  977. return IA64_FAULT;
  978. } else if (data->ma == VA_MATTR_NATPAGE) {
  979. vcpu_set_isr(vcpu, visr.val);
  980. dnat_page_consumption(vcpu, vadr);
  981. return IA64_FAULT;
  982. } else{
  983. *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
  984. | (vadr & (PSIZE(data->ps) - 1));
  985. return IA64_NO_FAULT;
  986. }
  987. }
  988. if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
  989. if (vpsr.ic) {
  990. vcpu_set_isr(vcpu, visr.val);
  991. alt_dtlb(vcpu, vadr);
  992. return IA64_FAULT;
  993. } else {
  994. nested_dtlb(vcpu);
  995. return IA64_FAULT;
  996. }
  997. } else {
  998. if (vpsr.ic) {
  999. vcpu_set_isr(vcpu, visr.val);
  1000. dvhpt_fault(vcpu, vadr);
  1001. return IA64_FAULT;
  1002. } else{
  1003. nested_dtlb(vcpu);
  1004. return IA64_FAULT;
  1005. }
  1006. }
  1007. return IA64_NO_FAULT;
  1008. }
  1009. int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
  1010. {
  1011. unsigned long r1, r3;
  1012. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1013. if (vcpu_tpa(vcpu, r3, &r1))
  1014. return IA64_FAULT;
  1015. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1016. return(IA64_NO_FAULT);
  1017. }
  1018. void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
  1019. {
  1020. unsigned long r1, r3;
  1021. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1022. r1 = vcpu_tak(vcpu, r3);
  1023. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1024. }
  1025. /************************************
  1026. * Insert/Purge translation register/cache
  1027. ************************************/
  1028. void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1029. {
  1030. thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
  1031. }
  1032. void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1033. {
  1034. thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
  1035. }
  1036. void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1037. {
  1038. u64 ps, va, rid;
  1039. struct thash_data *p_itr;
  1040. ps = itir_ps(itir);
  1041. va = PAGEALIGN(ifa, ps);
  1042. pte &= ~PAGE_FLAGS_RV_MASK;
  1043. rid = vcpu_get_rr(vcpu, ifa);
  1044. rid = rid & RR_RID_MASK;
  1045. p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
  1046. vcpu_set_tr(p_itr, pte, itir, va, rid);
  1047. vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
  1048. }
  1049. void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1050. {
  1051. u64 gpfn;
  1052. u64 ps, va, rid;
  1053. struct thash_data *p_dtr;
  1054. ps = itir_ps(itir);
  1055. va = PAGEALIGN(ifa, ps);
  1056. pte &= ~PAGE_FLAGS_RV_MASK;
  1057. if (ps != _PAGE_SIZE_16M)
  1058. thash_purge_entries(vcpu, va, ps);
  1059. gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  1060. if (__gpfn_is_io(gpfn))
  1061. pte |= VTLB_PTE_IO;
  1062. rid = vcpu_get_rr(vcpu, va);
  1063. rid = rid & RR_RID_MASK;
  1064. p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
  1065. vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
  1066. pte, itir, va, rid);
  1067. vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
  1068. }
  1069. void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1070. {
  1071. int index;
  1072. u64 va;
  1073. va = PAGEALIGN(ifa, ps);
  1074. while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
  1075. vcpu->arch.dtrs[index].page_flags = 0;
  1076. thash_purge_entries(vcpu, va, ps);
  1077. }
  1078. void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1079. {
  1080. int index;
  1081. u64 va;
  1082. va = PAGEALIGN(ifa, ps);
  1083. while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
  1084. vcpu->arch.itrs[index].page_flags = 0;
  1085. thash_purge_entries(vcpu, va, ps);
  1086. }
  1087. void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1088. {
  1089. va = PAGEALIGN(va, ps);
  1090. thash_purge_entries(vcpu, va, ps);
  1091. }
  1092. void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
  1093. {
  1094. thash_purge_all(vcpu);
  1095. }
  1096. void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1097. {
  1098. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1099. long psr;
  1100. local_irq_save(psr);
  1101. p->exit_reason = EXIT_REASON_PTC_G;
  1102. p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
  1103. p->u.ptc_g_data.vaddr = va;
  1104. p->u.ptc_g_data.ps = ps;
  1105. vmm_transition(vcpu);
  1106. /* Do Local Purge Here*/
  1107. vcpu_ptc_l(vcpu, va, ps);
  1108. local_irq_restore(psr);
  1109. }
  1110. void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1111. {
  1112. vcpu_ptc_ga(vcpu, va, ps);
  1113. }
  1114. void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
  1115. {
  1116. unsigned long ifa;
  1117. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1118. vcpu_ptc_e(vcpu, ifa);
  1119. }
  1120. void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
  1121. {
  1122. unsigned long ifa, itir;
  1123. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1124. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1125. vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
  1126. }
  1127. void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
  1128. {
  1129. unsigned long ifa, itir;
  1130. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1131. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1132. vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
  1133. }
  1134. void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
  1135. {
  1136. unsigned long ifa, itir;
  1137. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1138. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1139. vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
  1140. }
  1141. void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1142. {
  1143. unsigned long ifa, itir;
  1144. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1145. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1146. vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
  1147. }
  1148. void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1149. {
  1150. unsigned long ifa, itir;
  1151. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1152. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1153. vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
  1154. }
  1155. void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1156. {
  1157. unsigned long itir, ifa, pte, slot;
  1158. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1159. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1160. itir = vcpu_get_itir(vcpu);
  1161. ifa = vcpu_get_ifa(vcpu);
  1162. vcpu_itr_d(vcpu, slot, pte, itir, ifa);
  1163. }
  1164. void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1165. {
  1166. unsigned long itir, ifa, pte, slot;
  1167. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1168. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1169. itir = vcpu_get_itir(vcpu);
  1170. ifa = vcpu_get_ifa(vcpu);
  1171. vcpu_itr_i(vcpu, slot, pte, itir, ifa);
  1172. }
  1173. void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
  1174. {
  1175. unsigned long itir, ifa, pte;
  1176. itir = vcpu_get_itir(vcpu);
  1177. ifa = vcpu_get_ifa(vcpu);
  1178. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1179. vcpu_itc_d(vcpu, pte, itir, ifa);
  1180. }
  1181. void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
  1182. {
  1183. unsigned long itir, ifa, pte;
  1184. itir = vcpu_get_itir(vcpu);
  1185. ifa = vcpu_get_ifa(vcpu);
  1186. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1187. vcpu_itc_i(vcpu, pte, itir, ifa);
  1188. }
  1189. /*************************************
  1190. * Moves to semi-privileged registers
  1191. *************************************/
  1192. void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
  1193. {
  1194. unsigned long imm;
  1195. if (inst.M30.s)
  1196. imm = -inst.M30.imm;
  1197. else
  1198. imm = inst.M30.imm;
  1199. vcpu_set_itc(vcpu, imm);
  1200. }
  1201. void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1202. {
  1203. unsigned long r2;
  1204. r2 = vcpu_get_gr(vcpu, inst.M29.r2);
  1205. vcpu_set_itc(vcpu, r2);
  1206. }
  1207. void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1208. {
  1209. unsigned long r1;
  1210. r1 = vcpu_get_itc(vcpu);
  1211. vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
  1212. }
  1213. /**************************************************************************
  1214. struct kvm_vcpu protection key register access routines
  1215. **************************************************************************/
  1216. unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
  1217. {
  1218. return ((unsigned long)ia64_get_pkr(reg));
  1219. }
  1220. void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
  1221. {
  1222. ia64_set_pkr(reg, val);
  1223. }
  1224. /********************************
  1225. * Moves to privileged registers
  1226. ********************************/
  1227. unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
  1228. unsigned long val)
  1229. {
  1230. union ia64_rr oldrr, newrr;
  1231. unsigned long rrval;
  1232. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1233. unsigned long psr;
  1234. oldrr.val = vcpu_get_rr(vcpu, reg);
  1235. newrr.val = val;
  1236. vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
  1237. switch ((unsigned long)(reg >> VRN_SHIFT)) {
  1238. case VRN6:
  1239. vcpu->arch.vmm_rr = vrrtomrr(val);
  1240. local_irq_save(psr);
  1241. p->exit_reason = EXIT_REASON_SWITCH_RR6;
  1242. vmm_transition(vcpu);
  1243. local_irq_restore(psr);
  1244. break;
  1245. case VRN4:
  1246. rrval = vrrtomrr(val);
  1247. vcpu->arch.metaphysical_saved_rr4 = rrval;
  1248. if (!is_physical_mode(vcpu))
  1249. ia64_set_rr(reg, rrval);
  1250. break;
  1251. case VRN0:
  1252. rrval = vrrtomrr(val);
  1253. vcpu->arch.metaphysical_saved_rr0 = rrval;
  1254. if (!is_physical_mode(vcpu))
  1255. ia64_set_rr(reg, rrval);
  1256. break;
  1257. default:
  1258. ia64_set_rr(reg, vrrtomrr(val));
  1259. break;
  1260. }
  1261. return (IA64_NO_FAULT);
  1262. }
  1263. void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1264. {
  1265. unsigned long r3, r2;
  1266. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1267. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1268. vcpu_set_rr(vcpu, r3, r2);
  1269. }
  1270. void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1271. {
  1272. }
  1273. void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1274. {
  1275. }
  1276. void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1277. {
  1278. unsigned long r3, r2;
  1279. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1280. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1281. vcpu_set_pmc(vcpu, r3, r2);
  1282. }
  1283. void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
  1284. {
  1285. unsigned long r3, r2;
  1286. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1287. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1288. vcpu_set_pmd(vcpu, r3, r2);
  1289. }
  1290. void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1291. {
  1292. u64 r3, r2;
  1293. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1294. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1295. vcpu_set_pkr(vcpu, r3, r2);
  1296. }
  1297. void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1298. {
  1299. unsigned long r3, r1;
  1300. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1301. r1 = vcpu_get_rr(vcpu, r3);
  1302. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1303. }
  1304. void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1305. {
  1306. unsigned long r3, r1;
  1307. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1308. r1 = vcpu_get_pkr(vcpu, r3);
  1309. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1310. }
  1311. void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1312. {
  1313. unsigned long r3, r1;
  1314. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1315. r1 = vcpu_get_dbr(vcpu, r3);
  1316. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1317. }
  1318. void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1319. {
  1320. unsigned long r3, r1;
  1321. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1322. r1 = vcpu_get_ibr(vcpu, r3);
  1323. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1324. }
  1325. void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1326. {
  1327. unsigned long r3, r1;
  1328. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1329. r1 = vcpu_get_pmc(vcpu, r3);
  1330. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1331. }
  1332. unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
  1333. {
  1334. /* FIXME: This could get called as a result of a rsvd-reg fault */
  1335. if (reg > (ia64_get_cpuid(3) & 0xff))
  1336. return 0;
  1337. else
  1338. return ia64_get_cpuid(reg);
  1339. }
  1340. void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
  1341. {
  1342. unsigned long r3, r1;
  1343. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1344. r1 = vcpu_get_cpuid(vcpu, r3);
  1345. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1346. }
  1347. void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
  1348. {
  1349. VCPU(vcpu, tpr) = val;
  1350. vcpu->arch.irq_check = 1;
  1351. }
  1352. unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1353. {
  1354. unsigned long r2;
  1355. r2 = vcpu_get_gr(vcpu, inst.M32.r2);
  1356. VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
  1357. switch (inst.M32.cr3) {
  1358. case 0:
  1359. vcpu_set_dcr(vcpu, r2);
  1360. break;
  1361. case 1:
  1362. vcpu_set_itm(vcpu, r2);
  1363. break;
  1364. case 66:
  1365. vcpu_set_tpr(vcpu, r2);
  1366. break;
  1367. case 67:
  1368. vcpu_set_eoi(vcpu, r2);
  1369. break;
  1370. default:
  1371. break;
  1372. }
  1373. return 0;
  1374. }
  1375. unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1376. {
  1377. unsigned long tgt = inst.M33.r1;
  1378. unsigned long val;
  1379. switch (inst.M33.cr3) {
  1380. case 65:
  1381. val = vcpu_get_ivr(vcpu);
  1382. vcpu_set_gr(vcpu, tgt, val, 0);
  1383. break;
  1384. case 67:
  1385. vcpu_set_gr(vcpu, tgt, 0L, 0);
  1386. break;
  1387. default:
  1388. val = VCPU(vcpu, vcr[inst.M33.cr3]);
  1389. vcpu_set_gr(vcpu, tgt, val, 0);
  1390. break;
  1391. }
  1392. return 0;
  1393. }
  1394. void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
  1395. {
  1396. unsigned long mask;
  1397. struct kvm_pt_regs *regs;
  1398. struct ia64_psr old_psr, new_psr;
  1399. old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1400. regs = vcpu_regs(vcpu);
  1401. /* We only support guest as:
  1402. * vpsr.pk = 0
  1403. * vpsr.is = 0
  1404. * Otherwise panic
  1405. */
  1406. if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
  1407. panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
  1408. & vpsr.is=0\n");
  1409. /*
  1410. * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  1411. * Since these bits will become 0, after success execution of each
  1412. * instruction, we will change set them to mIA64_PSR
  1413. */
  1414. VCPU(vcpu, vpsr) = val
  1415. & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
  1416. IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
  1417. if (!old_psr.i && (val & IA64_PSR_I)) {
  1418. /* vpsr.i 0->1 */
  1419. vcpu->arch.irq_check = 1;
  1420. }
  1421. new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1422. /*
  1423. * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  1424. * , except for the following bits:
  1425. * ic/i/dt/si/rt/mc/it/bn/vm
  1426. */
  1427. mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  1428. IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  1429. IA64_PSR_VM;
  1430. regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
  1431. check_mm_mode_switch(vcpu, old_psr, new_psr);
  1432. return ;
  1433. }
  1434. unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
  1435. {
  1436. struct ia64_psr vpsr;
  1437. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1438. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1439. if (!vpsr.ic)
  1440. VCPU(vcpu, ifs) = regs->cr_ifs;
  1441. regs->cr_ifs = IA64_IFS_V;
  1442. return (IA64_NO_FAULT);
  1443. }
  1444. /**************************************************************************
  1445. VCPU banked general register access routines
  1446. **************************************************************************/
  1447. #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1448. do { \
  1449. __asm__ __volatile__ ( \
  1450. ";;extr.u %0 = %3,%6,16;;\n" \
  1451. "dep %1 = %0, %1, 0, 16;;\n" \
  1452. "st8 [%4] = %1\n" \
  1453. "extr.u %0 = %2, 16, 16;;\n" \
  1454. "dep %3 = %0, %3, %6, 16;;\n" \
  1455. "st8 [%5] = %3\n" \
  1456. ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
  1457. "r"(*runat), "r"(b1unat), "r"(runat), \
  1458. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1459. } while (0)
  1460. void vcpu_bsw0(struct kvm_vcpu *vcpu)
  1461. {
  1462. unsigned long i;
  1463. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1464. unsigned long *r = &regs->r16;
  1465. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1466. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1467. unsigned long *runat = &regs->eml_unat;
  1468. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1469. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1470. if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  1471. for (i = 0; i < 16; i++) {
  1472. *b1++ = *r;
  1473. *r++ = *b0++;
  1474. }
  1475. vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  1476. VMM_PT_REGS_R16_SLOT);
  1477. VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  1478. }
  1479. }
  1480. #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1481. do { \
  1482. __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
  1483. "dep %1 = %0, %1, 16, 16;;\n" \
  1484. "st8 [%4] = %1\n" \
  1485. "extr.u %0 = %2, 0, 16;;\n" \
  1486. "dep %3 = %0, %3, %6, 16;;\n" \
  1487. "st8 [%5] = %3\n" \
  1488. ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
  1489. "r"(*runat), "r"(b0unat), "r"(runat), \
  1490. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1491. } while (0)
  1492. void vcpu_bsw1(struct kvm_vcpu *vcpu)
  1493. {
  1494. unsigned long i;
  1495. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1496. unsigned long *r = &regs->r16;
  1497. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1498. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1499. unsigned long *runat = &regs->eml_unat;
  1500. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1501. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1502. if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  1503. for (i = 0; i < 16; i++) {
  1504. *b0++ = *r;
  1505. *r++ = *b1++;
  1506. }
  1507. vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  1508. VMM_PT_REGS_R16_SLOT);
  1509. VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  1510. }
  1511. }
  1512. void vcpu_rfi(struct kvm_vcpu *vcpu)
  1513. {
  1514. unsigned long ifs, psr;
  1515. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1516. psr = VCPU(vcpu, ipsr);
  1517. if (psr & IA64_PSR_BN)
  1518. vcpu_bsw1(vcpu);
  1519. else
  1520. vcpu_bsw0(vcpu);
  1521. vcpu_set_psr(vcpu, psr);
  1522. ifs = VCPU(vcpu, ifs);
  1523. if (ifs >> 63)
  1524. regs->cr_ifs = ifs;
  1525. regs->cr_iip = VCPU(vcpu, iip);
  1526. }
  1527. /*
  1528. VPSR can't keep track of below bits of guest PSR
  1529. This function gets guest PSR
  1530. */
  1531. unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
  1532. {
  1533. unsigned long mask;
  1534. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1535. mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  1536. IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  1537. return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  1538. }
  1539. void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
  1540. {
  1541. unsigned long vpsr;
  1542. unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
  1543. | inst.M44.imm;
  1544. vpsr = vcpu_get_psr(vcpu);
  1545. vpsr &= (~imm24);
  1546. vcpu_set_psr(vcpu, vpsr);
  1547. }
  1548. void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
  1549. {
  1550. unsigned long vpsr;
  1551. unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
  1552. | inst.M44.imm;
  1553. vpsr = vcpu_get_psr(vcpu);
  1554. vpsr |= imm24;
  1555. vcpu_set_psr(vcpu, vpsr);
  1556. }
  1557. /* Generate Mask
  1558. * Parameter:
  1559. * bit -- starting bit
  1560. * len -- how many bits
  1561. */
  1562. #define MASK(bit,len) \
  1563. ({ \
  1564. __u64 ret; \
  1565. \
  1566. __asm __volatile("dep %0=-1, r0, %1, %2"\
  1567. : "=r" (ret): \
  1568. "M" (bit), \
  1569. "M" (len)); \
  1570. ret; \
  1571. })
  1572. void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
  1573. {
  1574. val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
  1575. vcpu_set_psr(vcpu, val);
  1576. }
  1577. void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1578. {
  1579. unsigned long val;
  1580. val = vcpu_get_gr(vcpu, inst.M35.r2);
  1581. vcpu_set_psr_l(vcpu, val);
  1582. }
  1583. void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1584. {
  1585. unsigned long val;
  1586. val = vcpu_get_psr(vcpu);
  1587. val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  1588. vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
  1589. }
  1590. void vcpu_increment_iip(struct kvm_vcpu *vcpu)
  1591. {
  1592. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1593. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1594. if (ipsr->ri == 2) {
  1595. ipsr->ri = 0;
  1596. regs->cr_iip += 16;
  1597. } else
  1598. ipsr->ri++;
  1599. }
  1600. void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
  1601. {
  1602. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1603. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1604. if (ipsr->ri == 0) {
  1605. ipsr->ri = 2;
  1606. regs->cr_iip -= 16;
  1607. } else
  1608. ipsr->ri--;
  1609. }
  1610. /** Emulate a privileged operation.
  1611. *
  1612. *
  1613. * @param vcpu virtual cpu
  1614. * @cause the reason cause virtualization fault
  1615. * @opcode the instruction code which cause virtualization fault
  1616. */
  1617. void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
  1618. {
  1619. unsigned long status, cause, opcode ;
  1620. INST64 inst;
  1621. status = IA64_NO_FAULT;
  1622. cause = VMX(vcpu, cause);
  1623. opcode = VMX(vcpu, opcode);
  1624. inst.inst = opcode;
  1625. /*
  1626. * Switch to actual virtual rid in rr0 and rr4,
  1627. * which is required by some tlb related instructions.
  1628. */
  1629. prepare_if_physical_mode(vcpu);
  1630. switch (cause) {
  1631. case EVENT_RSM:
  1632. kvm_rsm(vcpu, inst);
  1633. break;
  1634. case EVENT_SSM:
  1635. kvm_ssm(vcpu, inst);
  1636. break;
  1637. case EVENT_MOV_TO_PSR:
  1638. kvm_mov_to_psr(vcpu, inst);
  1639. break;
  1640. case EVENT_MOV_FROM_PSR:
  1641. kvm_mov_from_psr(vcpu, inst);
  1642. break;
  1643. case EVENT_MOV_FROM_CR:
  1644. kvm_mov_from_cr(vcpu, inst);
  1645. break;
  1646. case EVENT_MOV_TO_CR:
  1647. kvm_mov_to_cr(vcpu, inst);
  1648. break;
  1649. case EVENT_BSW_0:
  1650. vcpu_bsw0(vcpu);
  1651. break;
  1652. case EVENT_BSW_1:
  1653. vcpu_bsw1(vcpu);
  1654. break;
  1655. case EVENT_COVER:
  1656. vcpu_cover(vcpu);
  1657. break;
  1658. case EVENT_RFI:
  1659. vcpu_rfi(vcpu);
  1660. break;
  1661. case EVENT_ITR_D:
  1662. kvm_itr_d(vcpu, inst);
  1663. break;
  1664. case EVENT_ITR_I:
  1665. kvm_itr_i(vcpu, inst);
  1666. break;
  1667. case EVENT_PTR_D:
  1668. kvm_ptr_d(vcpu, inst);
  1669. break;
  1670. case EVENT_PTR_I:
  1671. kvm_ptr_i(vcpu, inst);
  1672. break;
  1673. case EVENT_ITC_D:
  1674. kvm_itc_d(vcpu, inst);
  1675. break;
  1676. case EVENT_ITC_I:
  1677. kvm_itc_i(vcpu, inst);
  1678. break;
  1679. case EVENT_PTC_L:
  1680. kvm_ptc_l(vcpu, inst);
  1681. break;
  1682. case EVENT_PTC_G:
  1683. kvm_ptc_g(vcpu, inst);
  1684. break;
  1685. case EVENT_PTC_GA:
  1686. kvm_ptc_ga(vcpu, inst);
  1687. break;
  1688. case EVENT_PTC_E:
  1689. kvm_ptc_e(vcpu, inst);
  1690. break;
  1691. case EVENT_MOV_TO_RR:
  1692. kvm_mov_to_rr(vcpu, inst);
  1693. break;
  1694. case EVENT_MOV_FROM_RR:
  1695. kvm_mov_from_rr(vcpu, inst);
  1696. break;
  1697. case EVENT_THASH:
  1698. kvm_thash(vcpu, inst);
  1699. break;
  1700. case EVENT_TTAG:
  1701. kvm_ttag(vcpu, inst);
  1702. break;
  1703. case EVENT_TPA:
  1704. status = kvm_tpa(vcpu, inst);
  1705. break;
  1706. case EVENT_TAK:
  1707. kvm_tak(vcpu, inst);
  1708. break;
  1709. case EVENT_MOV_TO_AR_IMM:
  1710. kvm_mov_to_ar_imm(vcpu, inst);
  1711. break;
  1712. case EVENT_MOV_TO_AR:
  1713. kvm_mov_to_ar_reg(vcpu, inst);
  1714. break;
  1715. case EVENT_MOV_FROM_AR:
  1716. kvm_mov_from_ar_reg(vcpu, inst);
  1717. break;
  1718. case EVENT_MOV_TO_DBR:
  1719. kvm_mov_to_dbr(vcpu, inst);
  1720. break;
  1721. case EVENT_MOV_TO_IBR:
  1722. kvm_mov_to_ibr(vcpu, inst);
  1723. break;
  1724. case EVENT_MOV_TO_PMC:
  1725. kvm_mov_to_pmc(vcpu, inst);
  1726. break;
  1727. case EVENT_MOV_TO_PMD:
  1728. kvm_mov_to_pmd(vcpu, inst);
  1729. break;
  1730. case EVENT_MOV_TO_PKR:
  1731. kvm_mov_to_pkr(vcpu, inst);
  1732. break;
  1733. case EVENT_MOV_FROM_DBR:
  1734. kvm_mov_from_dbr(vcpu, inst);
  1735. break;
  1736. case EVENT_MOV_FROM_IBR:
  1737. kvm_mov_from_ibr(vcpu, inst);
  1738. break;
  1739. case EVENT_MOV_FROM_PMC:
  1740. kvm_mov_from_pmc(vcpu, inst);
  1741. break;
  1742. case EVENT_MOV_FROM_PKR:
  1743. kvm_mov_from_pkr(vcpu, inst);
  1744. break;
  1745. case EVENT_MOV_FROM_CPUID:
  1746. kvm_mov_from_cpuid(vcpu, inst);
  1747. break;
  1748. case EVENT_VMSW:
  1749. status = IA64_FAULT;
  1750. break;
  1751. default:
  1752. break;
  1753. };
  1754. /*Assume all status is NO_FAULT ?*/
  1755. if (status == IA64_NO_FAULT && cause != EVENT_RFI)
  1756. vcpu_increment_iip(vcpu);
  1757. recover_if_physical_mode(vcpu);
  1758. }
  1759. void init_vcpu(struct kvm_vcpu *vcpu)
  1760. {
  1761. int i;
  1762. vcpu->arch.mode_flags = GUEST_IN_PHY;
  1763. VMX(vcpu, vrr[0]) = 0x38;
  1764. VMX(vcpu, vrr[1]) = 0x38;
  1765. VMX(vcpu, vrr[2]) = 0x38;
  1766. VMX(vcpu, vrr[3]) = 0x38;
  1767. VMX(vcpu, vrr[4]) = 0x38;
  1768. VMX(vcpu, vrr[5]) = 0x38;
  1769. VMX(vcpu, vrr[6]) = 0x38;
  1770. VMX(vcpu, vrr[7]) = 0x38;
  1771. VCPU(vcpu, vpsr) = IA64_PSR_BN;
  1772. VCPU(vcpu, dcr) = 0;
  1773. /* pta.size must not be 0. The minimum is 15 (32k) */
  1774. VCPU(vcpu, pta) = 15 << 2;
  1775. VCPU(vcpu, itv) = 0x10000;
  1776. VCPU(vcpu, itm) = 0;
  1777. VMX(vcpu, last_itc) = 0;
  1778. VCPU(vcpu, lid) = VCPU_LID(vcpu);
  1779. VCPU(vcpu, ivr) = 0;
  1780. VCPU(vcpu, tpr) = 0x10000;
  1781. VCPU(vcpu, eoi) = 0;
  1782. VCPU(vcpu, irr[0]) = 0;
  1783. VCPU(vcpu, irr[1]) = 0;
  1784. VCPU(vcpu, irr[2]) = 0;
  1785. VCPU(vcpu, irr[3]) = 0;
  1786. VCPU(vcpu, pmv) = 0x10000;
  1787. VCPU(vcpu, cmcv) = 0x10000;
  1788. VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
  1789. VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
  1790. update_vhpi(vcpu, NULL_VECTOR);
  1791. VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
  1792. for (i = 0; i < 4; i++)
  1793. VLSAPIC_INSVC(vcpu, i) = 0;
  1794. }
  1795. void kvm_init_all_rr(struct kvm_vcpu *vcpu)
  1796. {
  1797. unsigned long psr;
  1798. local_irq_save(psr);
  1799. /* WARNING: not allow co-exist of both virtual mode and physical
  1800. * mode in same region
  1801. */
  1802. vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
  1803. vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
  1804. if (is_physical_mode(vcpu)) {
  1805. if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  1806. panic_vm(vcpu, "Machine Status conflicts!\n");
  1807. ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
  1808. ia64_dv_serialize_data();
  1809. ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
  1810. ia64_dv_serialize_data();
  1811. } else {
  1812. ia64_set_rr((VRN0 << VRN_SHIFT),
  1813. vcpu->arch.metaphysical_saved_rr0);
  1814. ia64_dv_serialize_data();
  1815. ia64_set_rr((VRN4 << VRN_SHIFT),
  1816. vcpu->arch.metaphysical_saved_rr4);
  1817. ia64_dv_serialize_data();
  1818. }
  1819. ia64_set_rr((VRN1 << VRN_SHIFT),
  1820. vrrtomrr(VMX(vcpu, vrr[VRN1])));
  1821. ia64_dv_serialize_data();
  1822. ia64_set_rr((VRN2 << VRN_SHIFT),
  1823. vrrtomrr(VMX(vcpu, vrr[VRN2])));
  1824. ia64_dv_serialize_data();
  1825. ia64_set_rr((VRN3 << VRN_SHIFT),
  1826. vrrtomrr(VMX(vcpu, vrr[VRN3])));
  1827. ia64_dv_serialize_data();
  1828. ia64_set_rr((VRN5 << VRN_SHIFT),
  1829. vrrtomrr(VMX(vcpu, vrr[VRN5])));
  1830. ia64_dv_serialize_data();
  1831. ia64_set_rr((VRN7 << VRN_SHIFT),
  1832. vrrtomrr(VMX(vcpu, vrr[VRN7])));
  1833. ia64_dv_serialize_data();
  1834. ia64_srlz_d();
  1835. ia64_set_psr(psr);
  1836. }
  1837. int vmm_entry(void)
  1838. {
  1839. struct kvm_vcpu *v;
  1840. v = current_vcpu;
  1841. ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
  1842. 0, 0, 0, 0, 0, 0);
  1843. kvm_init_vtlb(v);
  1844. kvm_init_vhpt(v);
  1845. init_vcpu(v);
  1846. kvm_init_all_rr(v);
  1847. vmm_reset_entry();
  1848. return 0;
  1849. }
  1850. static void kvm_show_registers(struct kvm_pt_regs *regs)
  1851. {
  1852. unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
  1853. struct kvm_vcpu *vcpu = current_vcpu;
  1854. if (vcpu != NULL)
  1855. printk("vcpu 0x%p vcpu %d\n",
  1856. vcpu, vcpu->vcpu_id);
  1857. printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
  1858. regs->cr_ipsr, regs->cr_ifs, ip);
  1859. printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
  1860. regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
  1861. printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
  1862. regs->ar_rnat, regs->ar_bspstore, regs->pr);
  1863. printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
  1864. regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
  1865. printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
  1866. printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
  1867. regs->b6, regs->b7);
  1868. printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
  1869. regs->f6.u.bits[1], regs->f6.u.bits[0],
  1870. regs->f7.u.bits[1], regs->f7.u.bits[0]);
  1871. printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
  1872. regs->f8.u.bits[1], regs->f8.u.bits[0],
  1873. regs->f9.u.bits[1], regs->f9.u.bits[0]);
  1874. printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
  1875. regs->f10.u.bits[1], regs->f10.u.bits[0],
  1876. regs->f11.u.bits[1], regs->f11.u.bits[0]);
  1877. printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
  1878. regs->r2, regs->r3);
  1879. printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
  1880. regs->r9, regs->r10);
  1881. printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
  1882. regs->r12, regs->r13);
  1883. printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
  1884. regs->r15, regs->r16);
  1885. printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
  1886. regs->r18, regs->r19);
  1887. printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
  1888. regs->r21, regs->r22);
  1889. printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
  1890. regs->r24, regs->r25);
  1891. printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
  1892. regs->r27, regs->r28);
  1893. printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
  1894. regs->r30, regs->r31);
  1895. }
  1896. void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
  1897. {
  1898. va_list args;
  1899. char buf[256];
  1900. struct kvm_pt_regs *regs = vcpu_regs(v);
  1901. struct exit_ctl_data *p = &v->arch.exit_data;
  1902. va_start(args, fmt);
  1903. vsnprintf(buf, sizeof(buf), fmt, args);
  1904. va_end(args);
  1905. printk(buf);
  1906. kvm_show_registers(regs);
  1907. p->exit_reason = EXIT_REASON_VM_PANIC;
  1908. vmm_transition(v);
  1909. /*Never to return*/
  1910. while (1);
  1911. }