vcpu.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193
  1. /*
  2. * kvm_vcpu.c: handling all virtual cpu related thing.
  3. * Copyright (c) 2005, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Shaofan Li (Susue Li) <susie.li@intel.com>
  19. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  20. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21. * Xiantao Zhang <xiantao.zhang@intel.com>
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <asm/processor.h>
  26. #include <asm/ia64regs.h>
  27. #include <asm/gcc_intrin.h>
  28. #include <asm/kregs.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/tlb.h>
  31. #include "asm-offsets.h"
  32. #include "vcpu.h"
  33. /*
  34. * Special notes:
  35. * - Index by it/dt/rt sequence
  36. * - Only existing mode transitions are allowed in this table
  37. * - RSE is placed at lazy mode when emulating guest partial mode
  38. * - If gva happens to be rr0 and rr4, only allowed case is identity
  39. * mapping (gva=gpa), or panic! (How?)
  40. */
  41. int mm_switch_table[8][8] = {
  42. /* 2004/09/12(Kevin): Allow switch to self */
  43. /*
  44. * (it,dt,rt): (0,0,0) -> (1,1,1)
  45. * This kind of transition usually occurs in the very early
  46. * stage of Linux boot up procedure. Another case is in efi
  47. * and pal calls. (see "arch/ia64/kernel/head.S")
  48. *
  49. * (it,dt,rt): (0,0,0) -> (0,1,1)
  50. * This kind of transition is found when OSYa exits efi boot
  51. * service. Due to gva = gpa in this case (Same region),
  52. * data access can be satisfied though itlb entry for physical
  53. * emulation is hit.
  54. */
  55. {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
  56. {0, 0, 0, 0, 0, 0, 0, 0},
  57. {0, 0, 0, 0, 0, 0, 0, 0},
  58. /*
  59. * (it,dt,rt): (0,1,1) -> (1,1,1)
  60. * This kind of transition is found in OSYa.
  61. *
  62. * (it,dt,rt): (0,1,1) -> (0,0,0)
  63. * This kind of transition is found in OSYa
  64. */
  65. {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
  66. /* (1,0,0)->(1,1,1) */
  67. {0, 0, 0, 0, 0, 0, 0, SW_P2V},
  68. /*
  69. * (it,dt,rt): (1,0,1) -> (1,1,1)
  70. * This kind of transition usually occurs when Linux returns
  71. * from the low level TLB miss handlers.
  72. * (see "arch/ia64/kernel/ivt.S")
  73. */
  74. {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
  75. {0, 0, 0, 0, 0, 0, 0, 0},
  76. /*
  77. * (it,dt,rt): (1,1,1) -> (1,0,1)
  78. * This kind of transition usually occurs in Linux low level
  79. * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
  80. *
  81. * (it,dt,rt): (1,1,1) -> (0,0,0)
  82. * This kind of transition usually occurs in pal and efi calls,
  83. * which requires running in physical mode.
  84. * (see "arch/ia64/kernel/head.S")
  85. * (1,1,1)->(1,0,0)
  86. */
  87. {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
  88. };
  89. void physical_mode_init(struct kvm_vcpu *vcpu)
  90. {
  91. vcpu->arch.mode_flags = GUEST_IN_PHY;
  92. }
  93. void switch_to_physical_rid(struct kvm_vcpu *vcpu)
  94. {
  95. unsigned long psr;
  96. /* Save original virtual mode rr[0] and rr[4] */
  97. psr = ia64_clear_ic();
  98. ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
  99. ia64_srlz_d();
  100. ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
  101. ia64_srlz_d();
  102. ia64_set_psr(psr);
  103. return;
  104. }
  105. void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
  106. {
  107. unsigned long psr;
  108. psr = ia64_clear_ic();
  109. ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
  110. ia64_srlz_d();
  111. ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
  112. ia64_srlz_d();
  113. ia64_set_psr(psr);
  114. return;
  115. }
  116. static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
  117. {
  118. return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  119. }
  120. void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  121. struct ia64_psr new_psr)
  122. {
  123. int act;
  124. act = mm_switch_action(old_psr, new_psr);
  125. switch (act) {
  126. case SW_V2P:
  127. /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
  128. old_psr.val, new_psr.val);*/
  129. switch_to_physical_rid(vcpu);
  130. /*
  131. * Set rse to enforced lazy, to prevent active rse
  132. *save/restor when guest physical mode.
  133. */
  134. vcpu->arch.mode_flags |= GUEST_IN_PHY;
  135. break;
  136. case SW_P2V:
  137. switch_to_virtual_rid(vcpu);
  138. /*
  139. * recover old mode which is saved when entering
  140. * guest physical mode
  141. */
  142. vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  143. break;
  144. case SW_SELF:
  145. break;
  146. case SW_NOP:
  147. break;
  148. default:
  149. /* Sanity check */
  150. break;
  151. }
  152. return;
  153. }
  154. /*
  155. * In physical mode, insert tc/tr for region 0 and 4 uses
  156. * RID[0] and RID[4] which is for physical mode emulation.
  157. * However what those inserted tc/tr wants is rid for
  158. * virtual mode. So original virtual rid needs to be restored
  159. * before insert.
  160. *
  161. * Operations which required such switch include:
  162. * - insertions (itc.*, itr.*)
  163. * - purges (ptc.* and ptr.*)
  164. * - tpa
  165. * - tak
  166. * - thash?, ttag?
  167. * All above needs actual virtual rid for destination entry.
  168. */
  169. void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  170. struct ia64_psr new_psr)
  171. {
  172. if ((old_psr.dt != new_psr.dt)
  173. || (old_psr.it != new_psr.it)
  174. || (old_psr.rt != new_psr.rt))
  175. switch_mm_mode(vcpu, old_psr, new_psr);
  176. return;
  177. }
  178. /*
  179. * In physical mode, insert tc/tr for region 0 and 4 uses
  180. * RID[0] and RID[4] which is for physical mode emulation.
  181. * However what those inserted tc/tr wants is rid for
  182. * virtual mode. So original virtual rid needs to be restored
  183. * before insert.
  184. *
  185. * Operations which required such switch include:
  186. * - insertions (itc.*, itr.*)
  187. * - purges (ptc.* and ptr.*)
  188. * - tpa
  189. * - tak
  190. * - thash?, ttag?
  191. * All above needs actual virtual rid for destination entry.
  192. */
  193. void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
  194. {
  195. if (is_physical_mode(vcpu)) {
  196. vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  197. switch_to_virtual_rid(vcpu);
  198. }
  199. return;
  200. }
  201. /* Recover always follows prepare */
  202. void recover_if_physical_mode(struct kvm_vcpu *vcpu)
  203. {
  204. if (is_physical_mode(vcpu))
  205. switch_to_physical_rid(vcpu);
  206. vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  207. return;
  208. }
  209. #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
  210. static u16 gr_info[32] = {
  211. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  212. RPT(r1), RPT(r2), RPT(r3),
  213. RPT(r4), RPT(r5), RPT(r6), RPT(r7),
  214. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  215. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  216. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  217. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  218. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  219. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  220. };
  221. #define IA64_FIRST_STACKED_GR 32
  222. #define IA64_FIRST_ROTATING_FR 32
  223. static inline unsigned long
  224. rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
  225. {
  226. reg += rrb;
  227. if (reg >= sor)
  228. reg -= sor;
  229. return reg;
  230. }
  231. /*
  232. * Return the (rotated) index for floating point register
  233. * be in the REGNUM (REGNUM must range from 32-127,
  234. * result is in the range from 0-95.
  235. */
  236. static inline unsigned long fph_index(struct kvm_pt_regs *regs,
  237. long regnum)
  238. {
  239. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  240. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  241. }
  242. /*
  243. * The inverse of the above: given bspstore and the number of
  244. * registers, calculate ar.bsp.
  245. */
  246. static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
  247. long num_regs)
  248. {
  249. long delta = ia64_rse_slot_num(addr) + num_regs;
  250. int i = 0;
  251. if (num_regs < 0)
  252. delta -= 0x3e;
  253. if (delta < 0) {
  254. while (delta <= -0x3f) {
  255. i--;
  256. delta += 0x3f;
  257. }
  258. } else {
  259. while (delta >= 0x3f) {
  260. i++;
  261. delta -= 0x3f;
  262. }
  263. }
  264. return addr + num_regs + i;
  265. }
  266. static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  267. unsigned long *val, int *nat)
  268. {
  269. unsigned long *bsp, *addr, *rnat_addr, *bspstore;
  270. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  271. unsigned long nat_mask;
  272. unsigned long old_rsc, new_rsc;
  273. long sof = (regs->cr_ifs) & 0x7f;
  274. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  275. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  276. long ridx = r1 - 32;
  277. if (ridx < sor)
  278. ridx = rotate_reg(sor, rrb_gr, ridx);
  279. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  280. new_rsc = old_rsc&(~(0x3));
  281. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  282. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  283. bsp = kbs + (regs->loadrs >> 19);
  284. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. rnat_addr = ia64_rse_rnat_addr(addr);
  287. if (addr >= bspstore) {
  288. ia64_flushrs();
  289. ia64_mf();
  290. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  291. }
  292. *val = *addr;
  293. if (nat) {
  294. if (bspstore < rnat_addr)
  295. *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
  296. & nat_mask);
  297. else
  298. *nat = (int)!!((*rnat_addr) & nat_mask);
  299. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  300. }
  301. }
  302. void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  303. unsigned long val, unsigned long nat)
  304. {
  305. unsigned long *bsp, *bspstore, *addr, *rnat_addr;
  306. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  307. unsigned long nat_mask;
  308. unsigned long old_rsc, new_rsc, psr;
  309. unsigned long rnat;
  310. long sof = (regs->cr_ifs) & 0x7f;
  311. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  312. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  313. long ridx = r1 - 32;
  314. if (ridx < sor)
  315. ridx = rotate_reg(sor, rrb_gr, ridx);
  316. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  317. /* put RSC to lazy mode, and set loadrs 0 */
  318. new_rsc = old_rsc & (~0x3fff0003);
  319. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  320. bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
  321. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  322. nat_mask = 1UL << ia64_rse_slot_num(addr);
  323. rnat_addr = ia64_rse_rnat_addr(addr);
  324. local_irq_save(psr);
  325. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  326. if (addr >= bspstore) {
  327. ia64_flushrs();
  328. ia64_mf();
  329. *addr = val;
  330. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  331. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  332. if (bspstore < rnat_addr)
  333. rnat = rnat & (~nat_mask);
  334. else
  335. *rnat_addr = (*rnat_addr)&(~nat_mask);
  336. ia64_mf();
  337. ia64_loadrs();
  338. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  339. } else {
  340. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  341. *addr = val;
  342. if (bspstore < rnat_addr)
  343. rnat = rnat&(~nat_mask);
  344. else
  345. *rnat_addr = (*rnat_addr) & (~nat_mask);
  346. ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
  347. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  348. }
  349. local_irq_restore(psr);
  350. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  351. }
  352. void getreg(unsigned long regnum, unsigned long *val,
  353. int *nat, struct kvm_pt_regs *regs)
  354. {
  355. unsigned long addr, *unat;
  356. if (regnum >= IA64_FIRST_STACKED_GR) {
  357. get_rse_reg(regs, regnum, val, nat);
  358. return;
  359. }
  360. /*
  361. * Now look at registers in [0-31] range and init correct UNAT
  362. */
  363. addr = (unsigned long)regs;
  364. unat = &regs->eml_unat;;
  365. addr += gr_info[regnum];
  366. *val = *(unsigned long *)addr;
  367. /*
  368. * do it only when requested
  369. */
  370. if (nat)
  371. *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
  372. }
  373. void setreg(unsigned long regnum, unsigned long val,
  374. int nat, struct kvm_pt_regs *regs)
  375. {
  376. unsigned long addr;
  377. unsigned long bitmask;
  378. unsigned long *unat;
  379. /*
  380. * First takes care of stacked registers
  381. */
  382. if (regnum >= IA64_FIRST_STACKED_GR) {
  383. set_rse_reg(regs, regnum, val, nat);
  384. return;
  385. }
  386. /*
  387. * Now look at registers in [0-31] range and init correct UNAT
  388. */
  389. addr = (unsigned long)regs;
  390. unat = &regs->eml_unat;
  391. /*
  392. * add offset from base of struct
  393. * and do it !
  394. */
  395. addr += gr_info[regnum];
  396. *(unsigned long *)addr = val;
  397. /*
  398. * We need to clear the corresponding UNAT bit to fully emulate the load
  399. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  400. */
  401. bitmask = 1UL << ((addr >> 3) & 0x3f);
  402. if (nat)
  403. *unat |= bitmask;
  404. else
  405. *unat &= ~bitmask;
  406. }
  407. u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
  408. {
  409. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  410. u64 val;
  411. if (!reg)
  412. return 0;
  413. getreg(reg, &val, 0, regs);
  414. return val;
  415. }
  416. void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
  417. {
  418. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  419. long sof = (regs->cr_ifs) & 0x7f;
  420. if (!reg)
  421. return;
  422. if (reg >= sof + 32)
  423. return;
  424. setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
  425. }
  426. void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  427. struct kvm_pt_regs *regs)
  428. {
  429. /* Take floating register rotation into consideration*/
  430. if (regnum >= IA64_FIRST_ROTATING_FR)
  431. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  432. #define CASE_FIXED_FP(reg) \
  433. case (reg) : \
  434. ia64_stf_spill(fpval, reg); \
  435. break
  436. switch (regnum) {
  437. CASE_FIXED_FP(0);
  438. CASE_FIXED_FP(1);
  439. CASE_FIXED_FP(2);
  440. CASE_FIXED_FP(3);
  441. CASE_FIXED_FP(4);
  442. CASE_FIXED_FP(5);
  443. CASE_FIXED_FP(6);
  444. CASE_FIXED_FP(7);
  445. CASE_FIXED_FP(8);
  446. CASE_FIXED_FP(9);
  447. CASE_FIXED_FP(10);
  448. CASE_FIXED_FP(11);
  449. CASE_FIXED_FP(12);
  450. CASE_FIXED_FP(13);
  451. CASE_FIXED_FP(14);
  452. CASE_FIXED_FP(15);
  453. CASE_FIXED_FP(16);
  454. CASE_FIXED_FP(17);
  455. CASE_FIXED_FP(18);
  456. CASE_FIXED_FP(19);
  457. CASE_FIXED_FP(20);
  458. CASE_FIXED_FP(21);
  459. CASE_FIXED_FP(22);
  460. CASE_FIXED_FP(23);
  461. CASE_FIXED_FP(24);
  462. CASE_FIXED_FP(25);
  463. CASE_FIXED_FP(26);
  464. CASE_FIXED_FP(27);
  465. CASE_FIXED_FP(28);
  466. CASE_FIXED_FP(29);
  467. CASE_FIXED_FP(30);
  468. CASE_FIXED_FP(31);
  469. CASE_FIXED_FP(32);
  470. CASE_FIXED_FP(33);
  471. CASE_FIXED_FP(34);
  472. CASE_FIXED_FP(35);
  473. CASE_FIXED_FP(36);
  474. CASE_FIXED_FP(37);
  475. CASE_FIXED_FP(38);
  476. CASE_FIXED_FP(39);
  477. CASE_FIXED_FP(40);
  478. CASE_FIXED_FP(41);
  479. CASE_FIXED_FP(42);
  480. CASE_FIXED_FP(43);
  481. CASE_FIXED_FP(44);
  482. CASE_FIXED_FP(45);
  483. CASE_FIXED_FP(46);
  484. CASE_FIXED_FP(47);
  485. CASE_FIXED_FP(48);
  486. CASE_FIXED_FP(49);
  487. CASE_FIXED_FP(50);
  488. CASE_FIXED_FP(51);
  489. CASE_FIXED_FP(52);
  490. CASE_FIXED_FP(53);
  491. CASE_FIXED_FP(54);
  492. CASE_FIXED_FP(55);
  493. CASE_FIXED_FP(56);
  494. CASE_FIXED_FP(57);
  495. CASE_FIXED_FP(58);
  496. CASE_FIXED_FP(59);
  497. CASE_FIXED_FP(60);
  498. CASE_FIXED_FP(61);
  499. CASE_FIXED_FP(62);
  500. CASE_FIXED_FP(63);
  501. CASE_FIXED_FP(64);
  502. CASE_FIXED_FP(65);
  503. CASE_FIXED_FP(66);
  504. CASE_FIXED_FP(67);
  505. CASE_FIXED_FP(68);
  506. CASE_FIXED_FP(69);
  507. CASE_FIXED_FP(70);
  508. CASE_FIXED_FP(71);
  509. CASE_FIXED_FP(72);
  510. CASE_FIXED_FP(73);
  511. CASE_FIXED_FP(74);
  512. CASE_FIXED_FP(75);
  513. CASE_FIXED_FP(76);
  514. CASE_FIXED_FP(77);
  515. CASE_FIXED_FP(78);
  516. CASE_FIXED_FP(79);
  517. CASE_FIXED_FP(80);
  518. CASE_FIXED_FP(81);
  519. CASE_FIXED_FP(82);
  520. CASE_FIXED_FP(83);
  521. CASE_FIXED_FP(84);
  522. CASE_FIXED_FP(85);
  523. CASE_FIXED_FP(86);
  524. CASE_FIXED_FP(87);
  525. CASE_FIXED_FP(88);
  526. CASE_FIXED_FP(89);
  527. CASE_FIXED_FP(90);
  528. CASE_FIXED_FP(91);
  529. CASE_FIXED_FP(92);
  530. CASE_FIXED_FP(93);
  531. CASE_FIXED_FP(94);
  532. CASE_FIXED_FP(95);
  533. CASE_FIXED_FP(96);
  534. CASE_FIXED_FP(97);
  535. CASE_FIXED_FP(98);
  536. CASE_FIXED_FP(99);
  537. CASE_FIXED_FP(100);
  538. CASE_FIXED_FP(101);
  539. CASE_FIXED_FP(102);
  540. CASE_FIXED_FP(103);
  541. CASE_FIXED_FP(104);
  542. CASE_FIXED_FP(105);
  543. CASE_FIXED_FP(106);
  544. CASE_FIXED_FP(107);
  545. CASE_FIXED_FP(108);
  546. CASE_FIXED_FP(109);
  547. CASE_FIXED_FP(110);
  548. CASE_FIXED_FP(111);
  549. CASE_FIXED_FP(112);
  550. CASE_FIXED_FP(113);
  551. CASE_FIXED_FP(114);
  552. CASE_FIXED_FP(115);
  553. CASE_FIXED_FP(116);
  554. CASE_FIXED_FP(117);
  555. CASE_FIXED_FP(118);
  556. CASE_FIXED_FP(119);
  557. CASE_FIXED_FP(120);
  558. CASE_FIXED_FP(121);
  559. CASE_FIXED_FP(122);
  560. CASE_FIXED_FP(123);
  561. CASE_FIXED_FP(124);
  562. CASE_FIXED_FP(125);
  563. CASE_FIXED_FP(126);
  564. CASE_FIXED_FP(127);
  565. }
  566. #undef CASE_FIXED_FP
  567. }
  568. void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  569. struct kvm_pt_regs *regs)
  570. {
  571. /* Take floating register rotation into consideration*/
  572. if (regnum >= IA64_FIRST_ROTATING_FR)
  573. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  574. #define CASE_FIXED_FP(reg) \
  575. case (reg) : \
  576. ia64_ldf_fill(reg, fpval); \
  577. break
  578. switch (regnum) {
  579. CASE_FIXED_FP(2);
  580. CASE_FIXED_FP(3);
  581. CASE_FIXED_FP(4);
  582. CASE_FIXED_FP(5);
  583. CASE_FIXED_FP(6);
  584. CASE_FIXED_FP(7);
  585. CASE_FIXED_FP(8);
  586. CASE_FIXED_FP(9);
  587. CASE_FIXED_FP(10);
  588. CASE_FIXED_FP(11);
  589. CASE_FIXED_FP(12);
  590. CASE_FIXED_FP(13);
  591. CASE_FIXED_FP(14);
  592. CASE_FIXED_FP(15);
  593. CASE_FIXED_FP(16);
  594. CASE_FIXED_FP(17);
  595. CASE_FIXED_FP(18);
  596. CASE_FIXED_FP(19);
  597. CASE_FIXED_FP(20);
  598. CASE_FIXED_FP(21);
  599. CASE_FIXED_FP(22);
  600. CASE_FIXED_FP(23);
  601. CASE_FIXED_FP(24);
  602. CASE_FIXED_FP(25);
  603. CASE_FIXED_FP(26);
  604. CASE_FIXED_FP(27);
  605. CASE_FIXED_FP(28);
  606. CASE_FIXED_FP(29);
  607. CASE_FIXED_FP(30);
  608. CASE_FIXED_FP(31);
  609. CASE_FIXED_FP(32);
  610. CASE_FIXED_FP(33);
  611. CASE_FIXED_FP(34);
  612. CASE_FIXED_FP(35);
  613. CASE_FIXED_FP(36);
  614. CASE_FIXED_FP(37);
  615. CASE_FIXED_FP(38);
  616. CASE_FIXED_FP(39);
  617. CASE_FIXED_FP(40);
  618. CASE_FIXED_FP(41);
  619. CASE_FIXED_FP(42);
  620. CASE_FIXED_FP(43);
  621. CASE_FIXED_FP(44);
  622. CASE_FIXED_FP(45);
  623. CASE_FIXED_FP(46);
  624. CASE_FIXED_FP(47);
  625. CASE_FIXED_FP(48);
  626. CASE_FIXED_FP(49);
  627. CASE_FIXED_FP(50);
  628. CASE_FIXED_FP(51);
  629. CASE_FIXED_FP(52);
  630. CASE_FIXED_FP(53);
  631. CASE_FIXED_FP(54);
  632. CASE_FIXED_FP(55);
  633. CASE_FIXED_FP(56);
  634. CASE_FIXED_FP(57);
  635. CASE_FIXED_FP(58);
  636. CASE_FIXED_FP(59);
  637. CASE_FIXED_FP(60);
  638. CASE_FIXED_FP(61);
  639. CASE_FIXED_FP(62);
  640. CASE_FIXED_FP(63);
  641. CASE_FIXED_FP(64);
  642. CASE_FIXED_FP(65);
  643. CASE_FIXED_FP(66);
  644. CASE_FIXED_FP(67);
  645. CASE_FIXED_FP(68);
  646. CASE_FIXED_FP(69);
  647. CASE_FIXED_FP(70);
  648. CASE_FIXED_FP(71);
  649. CASE_FIXED_FP(72);
  650. CASE_FIXED_FP(73);
  651. CASE_FIXED_FP(74);
  652. CASE_FIXED_FP(75);
  653. CASE_FIXED_FP(76);
  654. CASE_FIXED_FP(77);
  655. CASE_FIXED_FP(78);
  656. CASE_FIXED_FP(79);
  657. CASE_FIXED_FP(80);
  658. CASE_FIXED_FP(81);
  659. CASE_FIXED_FP(82);
  660. CASE_FIXED_FP(83);
  661. CASE_FIXED_FP(84);
  662. CASE_FIXED_FP(85);
  663. CASE_FIXED_FP(86);
  664. CASE_FIXED_FP(87);
  665. CASE_FIXED_FP(88);
  666. CASE_FIXED_FP(89);
  667. CASE_FIXED_FP(90);
  668. CASE_FIXED_FP(91);
  669. CASE_FIXED_FP(92);
  670. CASE_FIXED_FP(93);
  671. CASE_FIXED_FP(94);
  672. CASE_FIXED_FP(95);
  673. CASE_FIXED_FP(96);
  674. CASE_FIXED_FP(97);
  675. CASE_FIXED_FP(98);
  676. CASE_FIXED_FP(99);
  677. CASE_FIXED_FP(100);
  678. CASE_FIXED_FP(101);
  679. CASE_FIXED_FP(102);
  680. CASE_FIXED_FP(103);
  681. CASE_FIXED_FP(104);
  682. CASE_FIXED_FP(105);
  683. CASE_FIXED_FP(106);
  684. CASE_FIXED_FP(107);
  685. CASE_FIXED_FP(108);
  686. CASE_FIXED_FP(109);
  687. CASE_FIXED_FP(110);
  688. CASE_FIXED_FP(111);
  689. CASE_FIXED_FP(112);
  690. CASE_FIXED_FP(113);
  691. CASE_FIXED_FP(114);
  692. CASE_FIXED_FP(115);
  693. CASE_FIXED_FP(116);
  694. CASE_FIXED_FP(117);
  695. CASE_FIXED_FP(118);
  696. CASE_FIXED_FP(119);
  697. CASE_FIXED_FP(120);
  698. CASE_FIXED_FP(121);
  699. CASE_FIXED_FP(122);
  700. CASE_FIXED_FP(123);
  701. CASE_FIXED_FP(124);
  702. CASE_FIXED_FP(125);
  703. CASE_FIXED_FP(126);
  704. CASE_FIXED_FP(127);
  705. }
  706. }
  707. void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  708. struct ia64_fpreg *val)
  709. {
  710. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  711. getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  712. }
  713. void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  714. struct ia64_fpreg *val)
  715. {
  716. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  717. if (reg > 1)
  718. setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  719. }
  720. /************************************************************************
  721. * lsapic timer
  722. ***********************************************************************/
  723. u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
  724. {
  725. unsigned long guest_itc;
  726. guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
  727. if (guest_itc >= VMX(vcpu, last_itc)) {
  728. VMX(vcpu, last_itc) = guest_itc;
  729. return guest_itc;
  730. } else
  731. return VMX(vcpu, last_itc);
  732. }
  733. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
  734. static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
  735. {
  736. struct kvm_vcpu *v;
  737. struct kvm *kvm;
  738. int i;
  739. long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
  740. unsigned long vitv = VCPU(vcpu, itv);
  741. kvm = (struct kvm *)KVM_VM_BASE;
  742. if (vcpu->vcpu_id == 0) {
  743. for (i = 0; i < kvm->arch.online_vcpus; i++) {
  744. v = (struct kvm_vcpu *)((char *)vcpu +
  745. sizeof(struct kvm_vcpu_data) * i);
  746. VMX(v, itc_offset) = itc_offset;
  747. VMX(v, last_itc) = 0;
  748. }
  749. }
  750. VMX(vcpu, last_itc) = 0;
  751. if (VCPU(vcpu, itm) <= val) {
  752. VMX(vcpu, itc_check) = 0;
  753. vcpu_unpend_interrupt(vcpu, vitv);
  754. } else {
  755. VMX(vcpu, itc_check) = 1;
  756. vcpu_set_itm(vcpu, VCPU(vcpu, itm));
  757. }
  758. }
  759. static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
  760. {
  761. return ((u64)VCPU(vcpu, itm));
  762. }
  763. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
  764. {
  765. unsigned long vitv = VCPU(vcpu, itv);
  766. VCPU(vcpu, itm) = val;
  767. if (val > vcpu_get_itc(vcpu)) {
  768. VMX(vcpu, itc_check) = 1;
  769. vcpu_unpend_interrupt(vcpu, vitv);
  770. VMX(vcpu, timer_pending) = 0;
  771. } else
  772. VMX(vcpu, itc_check) = 0;
  773. }
  774. #define ITV_VECTOR(itv) (itv&0xff)
  775. #define ITV_IRQ_MASK(itv) (itv&(1<<16))
  776. static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
  777. {
  778. VCPU(vcpu, itv) = val;
  779. if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
  780. vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
  781. vcpu->arch.timer_pending = 0;
  782. }
  783. }
  784. static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
  785. {
  786. int vec;
  787. vec = highest_inservice_irq(vcpu);
  788. if (vec == NULL_VECTOR)
  789. return;
  790. VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
  791. VCPU(vcpu, eoi) = 0;
  792. vcpu->arch.irq_new_pending = 1;
  793. }
  794. /* See Table 5-8 in SDM vol2 for the definition */
  795. int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
  796. {
  797. union ia64_tpr vtpr;
  798. vtpr.val = VCPU(vcpu, tpr);
  799. if (h_inservice == NMI_VECTOR)
  800. return IRQ_MASKED_BY_INSVC;
  801. if (h_pending == NMI_VECTOR) {
  802. /* Non Maskable Interrupt */
  803. return IRQ_NO_MASKED;
  804. }
  805. if (h_inservice == ExtINT_VECTOR)
  806. return IRQ_MASKED_BY_INSVC;
  807. if (h_pending == ExtINT_VECTOR) {
  808. if (vtpr.mmi) {
  809. /* mask all external IRQ */
  810. return IRQ_MASKED_BY_VTPR;
  811. } else
  812. return IRQ_NO_MASKED;
  813. }
  814. if (is_higher_irq(h_pending, h_inservice)) {
  815. if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
  816. return IRQ_NO_MASKED;
  817. else
  818. return IRQ_MASKED_BY_VTPR;
  819. } else {
  820. return IRQ_MASKED_BY_INSVC;
  821. }
  822. }
  823. void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  824. {
  825. long spsr;
  826. int ret;
  827. local_irq_save(spsr);
  828. ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
  829. local_irq_restore(spsr);
  830. vcpu->arch.irq_new_pending = 1;
  831. }
  832. void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  833. {
  834. long spsr;
  835. int ret;
  836. local_irq_save(spsr);
  837. ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
  838. local_irq_restore(spsr);
  839. if (ret) {
  840. vcpu->arch.irq_new_pending = 1;
  841. wmb();
  842. }
  843. }
  844. void update_vhpi(struct kvm_vcpu *vcpu, int vec)
  845. {
  846. u64 vhpi;
  847. if (vec == NULL_VECTOR)
  848. vhpi = 0;
  849. else if (vec == NMI_VECTOR)
  850. vhpi = 32;
  851. else if (vec == ExtINT_VECTOR)
  852. vhpi = 16;
  853. else
  854. vhpi = vec >> 4;
  855. VCPU(vcpu, vhpi) = vhpi;
  856. if (VCPU(vcpu, vac).a_int)
  857. ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
  858. (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
  859. }
  860. u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
  861. {
  862. int vec, h_inservice, mask;
  863. vec = highest_pending_irq(vcpu);
  864. h_inservice = highest_inservice_irq(vcpu);
  865. mask = irq_masked(vcpu, vec, h_inservice);
  866. if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
  867. if (VCPU(vcpu, vhpi))
  868. update_vhpi(vcpu, NULL_VECTOR);
  869. return IA64_SPURIOUS_INT_VECTOR;
  870. }
  871. if (mask == IRQ_MASKED_BY_VTPR) {
  872. update_vhpi(vcpu, vec);
  873. return IA64_SPURIOUS_INT_VECTOR;
  874. }
  875. VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
  876. vcpu_unpend_interrupt(vcpu, vec);
  877. return (u64)vec;
  878. }
  879. /**************************************************************************
  880. Privileged operation emulation routines
  881. **************************************************************************/
  882. u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
  883. {
  884. union ia64_pta vpta;
  885. union ia64_rr vrr;
  886. u64 pval;
  887. u64 vhpt_offset;
  888. vpta.val = vcpu_get_pta(vcpu);
  889. vrr.val = vcpu_get_rr(vcpu, vadr);
  890. vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
  891. if (vpta.vf) {
  892. pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
  893. vpta.val, 0, 0, 0, 0);
  894. } else {
  895. pval = (vadr & VRN_MASK) | vhpt_offset |
  896. (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
  897. }
  898. return pval;
  899. }
  900. u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
  901. {
  902. union ia64_rr vrr;
  903. union ia64_pta vpta;
  904. u64 pval;
  905. vpta.val = vcpu_get_pta(vcpu);
  906. vrr.val = vcpu_get_rr(vcpu, vadr);
  907. if (vpta.vf) {
  908. pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
  909. 0, 0, 0, 0, 0);
  910. } else
  911. pval = 1;
  912. return pval;
  913. }
  914. u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
  915. {
  916. struct thash_data *data;
  917. union ia64_pta vpta;
  918. u64 key;
  919. vpta.val = vcpu_get_pta(vcpu);
  920. if (vpta.vf == 0) {
  921. key = 1;
  922. return key;
  923. }
  924. data = vtlb_lookup(vcpu, vadr, D_TLB);
  925. if (!data || !data->p)
  926. key = 1;
  927. else
  928. key = data->key;
  929. return key;
  930. }
  931. void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
  932. {
  933. unsigned long thash, vadr;
  934. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  935. thash = vcpu_thash(vcpu, vadr);
  936. vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
  937. }
  938. void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
  939. {
  940. unsigned long tag, vadr;
  941. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  942. tag = vcpu_ttag(vcpu, vadr);
  943. vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
  944. }
  945. int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
  946. {
  947. struct thash_data *data;
  948. union ia64_isr visr, pt_isr;
  949. struct kvm_pt_regs *regs;
  950. struct ia64_psr vpsr;
  951. regs = vcpu_regs(vcpu);
  952. pt_isr.val = VMX(vcpu, cr_isr);
  953. visr.val = 0;
  954. visr.ei = pt_isr.ei;
  955. visr.ir = pt_isr.ir;
  956. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  957. visr.na = 1;
  958. data = vhpt_lookup(vadr);
  959. if (data) {
  960. if (data->p == 0) {
  961. vcpu_set_isr(vcpu, visr.val);
  962. data_page_not_present(vcpu, vadr);
  963. return IA64_FAULT;
  964. } else if (data->ma == VA_MATTR_NATPAGE) {
  965. vcpu_set_isr(vcpu, visr.val);
  966. dnat_page_consumption(vcpu, vadr);
  967. return IA64_FAULT;
  968. } else {
  969. *padr = (data->gpaddr >> data->ps << data->ps) |
  970. (vadr & (PSIZE(data->ps) - 1));
  971. return IA64_NO_FAULT;
  972. }
  973. }
  974. data = vtlb_lookup(vcpu, vadr, D_TLB);
  975. if (data) {
  976. if (data->p == 0) {
  977. vcpu_set_isr(vcpu, visr.val);
  978. data_page_not_present(vcpu, vadr);
  979. return IA64_FAULT;
  980. } else if (data->ma == VA_MATTR_NATPAGE) {
  981. vcpu_set_isr(vcpu, visr.val);
  982. dnat_page_consumption(vcpu, vadr);
  983. return IA64_FAULT;
  984. } else{
  985. *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
  986. | (vadr & (PSIZE(data->ps) - 1));
  987. return IA64_NO_FAULT;
  988. }
  989. }
  990. if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
  991. if (vpsr.ic) {
  992. vcpu_set_isr(vcpu, visr.val);
  993. alt_dtlb(vcpu, vadr);
  994. return IA64_FAULT;
  995. } else {
  996. nested_dtlb(vcpu);
  997. return IA64_FAULT;
  998. }
  999. } else {
  1000. if (vpsr.ic) {
  1001. vcpu_set_isr(vcpu, visr.val);
  1002. dvhpt_fault(vcpu, vadr);
  1003. return IA64_FAULT;
  1004. } else{
  1005. nested_dtlb(vcpu);
  1006. return IA64_FAULT;
  1007. }
  1008. }
  1009. return IA64_NO_FAULT;
  1010. }
  1011. int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
  1012. {
  1013. unsigned long r1, r3;
  1014. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1015. if (vcpu_tpa(vcpu, r3, &r1))
  1016. return IA64_FAULT;
  1017. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1018. return(IA64_NO_FAULT);
  1019. }
  1020. void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
  1021. {
  1022. unsigned long r1, r3;
  1023. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1024. r1 = vcpu_tak(vcpu, r3);
  1025. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1026. }
  1027. /************************************
  1028. * Insert/Purge translation register/cache
  1029. ************************************/
  1030. void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1031. {
  1032. thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
  1033. }
  1034. void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1035. {
  1036. thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
  1037. }
  1038. void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1039. {
  1040. u64 ps, va, rid;
  1041. struct thash_data *p_itr;
  1042. ps = itir_ps(itir);
  1043. va = PAGEALIGN(ifa, ps);
  1044. pte &= ~PAGE_FLAGS_RV_MASK;
  1045. rid = vcpu_get_rr(vcpu, ifa);
  1046. rid = rid & RR_RID_MASK;
  1047. p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
  1048. vcpu_set_tr(p_itr, pte, itir, va, rid);
  1049. vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
  1050. }
  1051. void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1052. {
  1053. u64 gpfn;
  1054. u64 ps, va, rid;
  1055. struct thash_data *p_dtr;
  1056. ps = itir_ps(itir);
  1057. va = PAGEALIGN(ifa, ps);
  1058. pte &= ~PAGE_FLAGS_RV_MASK;
  1059. if (ps != _PAGE_SIZE_16M)
  1060. thash_purge_entries(vcpu, va, ps);
  1061. gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  1062. if (__gpfn_is_io(gpfn))
  1063. pte |= VTLB_PTE_IO;
  1064. rid = vcpu_get_rr(vcpu, va);
  1065. rid = rid & RR_RID_MASK;
  1066. p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
  1067. vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
  1068. pte, itir, va, rid);
  1069. vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
  1070. }
  1071. void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1072. {
  1073. int index;
  1074. u64 va;
  1075. va = PAGEALIGN(ifa, ps);
  1076. while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
  1077. vcpu->arch.dtrs[index].page_flags = 0;
  1078. thash_purge_entries(vcpu, va, ps);
  1079. }
  1080. void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1081. {
  1082. int index;
  1083. u64 va;
  1084. va = PAGEALIGN(ifa, ps);
  1085. while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
  1086. vcpu->arch.itrs[index].page_flags = 0;
  1087. thash_purge_entries(vcpu, va, ps);
  1088. }
  1089. void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1090. {
  1091. va = PAGEALIGN(va, ps);
  1092. thash_purge_entries(vcpu, va, ps);
  1093. }
  1094. void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
  1095. {
  1096. thash_purge_all(vcpu);
  1097. }
  1098. void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1099. {
  1100. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1101. long psr;
  1102. local_irq_save(psr);
  1103. p->exit_reason = EXIT_REASON_PTC_G;
  1104. p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
  1105. p->u.ptc_g_data.vaddr = va;
  1106. p->u.ptc_g_data.ps = ps;
  1107. vmm_transition(vcpu);
  1108. /* Do Local Purge Here*/
  1109. vcpu_ptc_l(vcpu, va, ps);
  1110. local_irq_restore(psr);
  1111. }
  1112. void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1113. {
  1114. vcpu_ptc_ga(vcpu, va, ps);
  1115. }
  1116. void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
  1117. {
  1118. unsigned long ifa;
  1119. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1120. vcpu_ptc_e(vcpu, ifa);
  1121. }
  1122. void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
  1123. {
  1124. unsigned long ifa, itir;
  1125. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1126. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1127. vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
  1128. }
  1129. void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
  1130. {
  1131. unsigned long ifa, itir;
  1132. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1133. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1134. vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
  1135. }
  1136. void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
  1137. {
  1138. unsigned long ifa, itir;
  1139. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1140. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1141. vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
  1142. }
  1143. void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1144. {
  1145. unsigned long ifa, itir;
  1146. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1147. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1148. vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
  1149. }
  1150. void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1151. {
  1152. unsigned long ifa, itir;
  1153. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1154. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1155. vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
  1156. }
  1157. void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1158. {
  1159. unsigned long itir, ifa, pte, slot;
  1160. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1161. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1162. itir = vcpu_get_itir(vcpu);
  1163. ifa = vcpu_get_ifa(vcpu);
  1164. vcpu_itr_d(vcpu, slot, pte, itir, ifa);
  1165. }
  1166. void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1167. {
  1168. unsigned long itir, ifa, pte, slot;
  1169. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1170. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1171. itir = vcpu_get_itir(vcpu);
  1172. ifa = vcpu_get_ifa(vcpu);
  1173. vcpu_itr_i(vcpu, slot, pte, itir, ifa);
  1174. }
  1175. void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
  1176. {
  1177. unsigned long itir, ifa, pte;
  1178. itir = vcpu_get_itir(vcpu);
  1179. ifa = vcpu_get_ifa(vcpu);
  1180. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1181. vcpu_itc_d(vcpu, pte, itir, ifa);
  1182. }
  1183. void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
  1184. {
  1185. unsigned long itir, ifa, pte;
  1186. itir = vcpu_get_itir(vcpu);
  1187. ifa = vcpu_get_ifa(vcpu);
  1188. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1189. vcpu_itc_i(vcpu, pte, itir, ifa);
  1190. }
  1191. /*************************************
  1192. * Moves to semi-privileged registers
  1193. *************************************/
  1194. void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
  1195. {
  1196. unsigned long imm;
  1197. if (inst.M30.s)
  1198. imm = -inst.M30.imm;
  1199. else
  1200. imm = inst.M30.imm;
  1201. vcpu_set_itc(vcpu, imm);
  1202. }
  1203. void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1204. {
  1205. unsigned long r2;
  1206. r2 = vcpu_get_gr(vcpu, inst.M29.r2);
  1207. vcpu_set_itc(vcpu, r2);
  1208. }
  1209. void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1210. {
  1211. unsigned long r1;
  1212. r1 = vcpu_get_itc(vcpu);
  1213. vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
  1214. }
  1215. /**************************************************************************
  1216. struct kvm_vcpu protection key register access routines
  1217. **************************************************************************/
  1218. unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
  1219. {
  1220. return ((unsigned long)ia64_get_pkr(reg));
  1221. }
  1222. void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
  1223. {
  1224. ia64_set_pkr(reg, val);
  1225. }
  1226. /********************************
  1227. * Moves to privileged registers
  1228. ********************************/
  1229. unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
  1230. unsigned long val)
  1231. {
  1232. union ia64_rr oldrr, newrr;
  1233. unsigned long rrval;
  1234. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1235. unsigned long psr;
  1236. oldrr.val = vcpu_get_rr(vcpu, reg);
  1237. newrr.val = val;
  1238. vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
  1239. switch ((unsigned long)(reg >> VRN_SHIFT)) {
  1240. case VRN6:
  1241. vcpu->arch.vmm_rr = vrrtomrr(val);
  1242. local_irq_save(psr);
  1243. p->exit_reason = EXIT_REASON_SWITCH_RR6;
  1244. vmm_transition(vcpu);
  1245. local_irq_restore(psr);
  1246. break;
  1247. case VRN4:
  1248. rrval = vrrtomrr(val);
  1249. vcpu->arch.metaphysical_saved_rr4 = rrval;
  1250. if (!is_physical_mode(vcpu))
  1251. ia64_set_rr(reg, rrval);
  1252. break;
  1253. case VRN0:
  1254. rrval = vrrtomrr(val);
  1255. vcpu->arch.metaphysical_saved_rr0 = rrval;
  1256. if (!is_physical_mode(vcpu))
  1257. ia64_set_rr(reg, rrval);
  1258. break;
  1259. default:
  1260. ia64_set_rr(reg, vrrtomrr(val));
  1261. break;
  1262. }
  1263. return (IA64_NO_FAULT);
  1264. }
  1265. void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1266. {
  1267. unsigned long r3, r2;
  1268. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1269. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1270. vcpu_set_rr(vcpu, r3, r2);
  1271. }
  1272. void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1273. {
  1274. }
  1275. void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1276. {
  1277. }
  1278. void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1279. {
  1280. unsigned long r3, r2;
  1281. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1282. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1283. vcpu_set_pmc(vcpu, r3, r2);
  1284. }
  1285. void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
  1286. {
  1287. unsigned long r3, r2;
  1288. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1289. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1290. vcpu_set_pmd(vcpu, r3, r2);
  1291. }
  1292. void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1293. {
  1294. u64 r3, r2;
  1295. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1296. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1297. vcpu_set_pkr(vcpu, r3, r2);
  1298. }
  1299. void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1300. {
  1301. unsigned long r3, r1;
  1302. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1303. r1 = vcpu_get_rr(vcpu, r3);
  1304. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1305. }
  1306. void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1307. {
  1308. unsigned long r3, r1;
  1309. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1310. r1 = vcpu_get_pkr(vcpu, r3);
  1311. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1312. }
  1313. void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1314. {
  1315. unsigned long r3, r1;
  1316. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1317. r1 = vcpu_get_dbr(vcpu, r3);
  1318. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1319. }
  1320. void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1321. {
  1322. unsigned long r3, r1;
  1323. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1324. r1 = vcpu_get_ibr(vcpu, r3);
  1325. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1326. }
  1327. void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1328. {
  1329. unsigned long r3, r1;
  1330. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1331. r1 = vcpu_get_pmc(vcpu, r3);
  1332. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1333. }
  1334. unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
  1335. {
  1336. /* FIXME: This could get called as a result of a rsvd-reg fault */
  1337. if (reg > (ia64_get_cpuid(3) & 0xff))
  1338. return 0;
  1339. else
  1340. return ia64_get_cpuid(reg);
  1341. }
  1342. void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
  1343. {
  1344. unsigned long r3, r1;
  1345. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1346. r1 = vcpu_get_cpuid(vcpu, r3);
  1347. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1348. }
  1349. void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
  1350. {
  1351. VCPU(vcpu, tpr) = val;
  1352. vcpu->arch.irq_check = 1;
  1353. }
  1354. unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1355. {
  1356. unsigned long r2;
  1357. r2 = vcpu_get_gr(vcpu, inst.M32.r2);
  1358. VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
  1359. switch (inst.M32.cr3) {
  1360. case 0:
  1361. vcpu_set_dcr(vcpu, r2);
  1362. break;
  1363. case 1:
  1364. vcpu_set_itm(vcpu, r2);
  1365. break;
  1366. case 66:
  1367. vcpu_set_tpr(vcpu, r2);
  1368. break;
  1369. case 67:
  1370. vcpu_set_eoi(vcpu, r2);
  1371. break;
  1372. default:
  1373. break;
  1374. }
  1375. return 0;
  1376. }
  1377. unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1378. {
  1379. unsigned long tgt = inst.M33.r1;
  1380. unsigned long val;
  1381. switch (inst.M33.cr3) {
  1382. case 65:
  1383. val = vcpu_get_ivr(vcpu);
  1384. vcpu_set_gr(vcpu, tgt, val, 0);
  1385. break;
  1386. case 67:
  1387. vcpu_set_gr(vcpu, tgt, 0L, 0);
  1388. break;
  1389. default:
  1390. val = VCPU(vcpu, vcr[inst.M33.cr3]);
  1391. vcpu_set_gr(vcpu, tgt, val, 0);
  1392. break;
  1393. }
  1394. return 0;
  1395. }
  1396. void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
  1397. {
  1398. unsigned long mask;
  1399. struct kvm_pt_regs *regs;
  1400. struct ia64_psr old_psr, new_psr;
  1401. old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1402. regs = vcpu_regs(vcpu);
  1403. /* We only support guest as:
  1404. * vpsr.pk = 0
  1405. * vpsr.is = 0
  1406. * Otherwise panic
  1407. */
  1408. if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
  1409. panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
  1410. & vpsr.is=0\n");
  1411. /*
  1412. * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  1413. * Since these bits will become 0, after success execution of each
  1414. * instruction, we will change set them to mIA64_PSR
  1415. */
  1416. VCPU(vcpu, vpsr) = val
  1417. & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
  1418. IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
  1419. if (!old_psr.i && (val & IA64_PSR_I)) {
  1420. /* vpsr.i 0->1 */
  1421. vcpu->arch.irq_check = 1;
  1422. }
  1423. new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1424. /*
  1425. * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  1426. * , except for the following bits:
  1427. * ic/i/dt/si/rt/mc/it/bn/vm
  1428. */
  1429. mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  1430. IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  1431. IA64_PSR_VM;
  1432. regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
  1433. check_mm_mode_switch(vcpu, old_psr, new_psr);
  1434. return ;
  1435. }
  1436. unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
  1437. {
  1438. struct ia64_psr vpsr;
  1439. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1440. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1441. if (!vpsr.ic)
  1442. VCPU(vcpu, ifs) = regs->cr_ifs;
  1443. regs->cr_ifs = IA64_IFS_V;
  1444. return (IA64_NO_FAULT);
  1445. }
  1446. /**************************************************************************
  1447. VCPU banked general register access routines
  1448. **************************************************************************/
  1449. #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1450. do { \
  1451. __asm__ __volatile__ ( \
  1452. ";;extr.u %0 = %3,%6,16;;\n" \
  1453. "dep %1 = %0, %1, 0, 16;;\n" \
  1454. "st8 [%4] = %1\n" \
  1455. "extr.u %0 = %2, 16, 16;;\n" \
  1456. "dep %3 = %0, %3, %6, 16;;\n" \
  1457. "st8 [%5] = %3\n" \
  1458. ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
  1459. "r"(*runat), "r"(b1unat), "r"(runat), \
  1460. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1461. } while (0)
  1462. void vcpu_bsw0(struct kvm_vcpu *vcpu)
  1463. {
  1464. unsigned long i;
  1465. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1466. unsigned long *r = &regs->r16;
  1467. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1468. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1469. unsigned long *runat = &regs->eml_unat;
  1470. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1471. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1472. if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  1473. for (i = 0; i < 16; i++) {
  1474. *b1++ = *r;
  1475. *r++ = *b0++;
  1476. }
  1477. vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  1478. VMM_PT_REGS_R16_SLOT);
  1479. VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  1480. }
  1481. }
  1482. #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1483. do { \
  1484. __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
  1485. "dep %1 = %0, %1, 16, 16;;\n" \
  1486. "st8 [%4] = %1\n" \
  1487. "extr.u %0 = %2, 0, 16;;\n" \
  1488. "dep %3 = %0, %3, %6, 16;;\n" \
  1489. "st8 [%5] = %3\n" \
  1490. ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
  1491. "r"(*runat), "r"(b0unat), "r"(runat), \
  1492. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1493. } while (0)
  1494. void vcpu_bsw1(struct kvm_vcpu *vcpu)
  1495. {
  1496. unsigned long i;
  1497. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1498. unsigned long *r = &regs->r16;
  1499. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1500. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1501. unsigned long *runat = &regs->eml_unat;
  1502. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1503. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1504. if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  1505. for (i = 0; i < 16; i++) {
  1506. *b0++ = *r;
  1507. *r++ = *b1++;
  1508. }
  1509. vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  1510. VMM_PT_REGS_R16_SLOT);
  1511. VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  1512. }
  1513. }
  1514. void vcpu_rfi(struct kvm_vcpu *vcpu)
  1515. {
  1516. unsigned long ifs, psr;
  1517. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1518. psr = VCPU(vcpu, ipsr);
  1519. if (psr & IA64_PSR_BN)
  1520. vcpu_bsw1(vcpu);
  1521. else
  1522. vcpu_bsw0(vcpu);
  1523. vcpu_set_psr(vcpu, psr);
  1524. ifs = VCPU(vcpu, ifs);
  1525. if (ifs >> 63)
  1526. regs->cr_ifs = ifs;
  1527. regs->cr_iip = VCPU(vcpu, iip);
  1528. }
  1529. /*
  1530. VPSR can't keep track of below bits of guest PSR
  1531. This function gets guest PSR
  1532. */
  1533. unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
  1534. {
  1535. unsigned long mask;
  1536. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1537. mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  1538. IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  1539. return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  1540. }
  1541. void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
  1542. {
  1543. unsigned long vpsr;
  1544. unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
  1545. | inst.M44.imm;
  1546. vpsr = vcpu_get_psr(vcpu);
  1547. vpsr &= (~imm24);
  1548. vcpu_set_psr(vcpu, vpsr);
  1549. }
  1550. void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
  1551. {
  1552. unsigned long vpsr;
  1553. unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
  1554. | inst.M44.imm;
  1555. vpsr = vcpu_get_psr(vcpu);
  1556. vpsr |= imm24;
  1557. vcpu_set_psr(vcpu, vpsr);
  1558. }
  1559. /* Generate Mask
  1560. * Parameter:
  1561. * bit -- starting bit
  1562. * len -- how many bits
  1563. */
  1564. #define MASK(bit,len) \
  1565. ({ \
  1566. __u64 ret; \
  1567. \
  1568. __asm __volatile("dep %0=-1, r0, %1, %2"\
  1569. : "=r" (ret): \
  1570. "M" (bit), \
  1571. "M" (len)); \
  1572. ret; \
  1573. })
  1574. void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
  1575. {
  1576. val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
  1577. vcpu_set_psr(vcpu, val);
  1578. }
  1579. void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1580. {
  1581. unsigned long val;
  1582. val = vcpu_get_gr(vcpu, inst.M35.r2);
  1583. vcpu_set_psr_l(vcpu, val);
  1584. }
  1585. void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1586. {
  1587. unsigned long val;
  1588. val = vcpu_get_psr(vcpu);
  1589. val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  1590. vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
  1591. }
  1592. void vcpu_increment_iip(struct kvm_vcpu *vcpu)
  1593. {
  1594. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1595. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1596. if (ipsr->ri == 2) {
  1597. ipsr->ri = 0;
  1598. regs->cr_iip += 16;
  1599. } else
  1600. ipsr->ri++;
  1601. }
  1602. void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
  1603. {
  1604. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1605. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1606. if (ipsr->ri == 0) {
  1607. ipsr->ri = 2;
  1608. regs->cr_iip -= 16;
  1609. } else
  1610. ipsr->ri--;
  1611. }
  1612. /** Emulate a privileged operation.
  1613. *
  1614. *
  1615. * @param vcpu virtual cpu
  1616. * @cause the reason cause virtualization fault
  1617. * @opcode the instruction code which cause virtualization fault
  1618. */
  1619. void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
  1620. {
  1621. unsigned long status, cause, opcode ;
  1622. INST64 inst;
  1623. status = IA64_NO_FAULT;
  1624. cause = VMX(vcpu, cause);
  1625. opcode = VMX(vcpu, opcode);
  1626. inst.inst = opcode;
  1627. /*
  1628. * Switch to actual virtual rid in rr0 and rr4,
  1629. * which is required by some tlb related instructions.
  1630. */
  1631. prepare_if_physical_mode(vcpu);
  1632. switch (cause) {
  1633. case EVENT_RSM:
  1634. kvm_rsm(vcpu, inst);
  1635. break;
  1636. case EVENT_SSM:
  1637. kvm_ssm(vcpu, inst);
  1638. break;
  1639. case EVENT_MOV_TO_PSR:
  1640. kvm_mov_to_psr(vcpu, inst);
  1641. break;
  1642. case EVENT_MOV_FROM_PSR:
  1643. kvm_mov_from_psr(vcpu, inst);
  1644. break;
  1645. case EVENT_MOV_FROM_CR:
  1646. kvm_mov_from_cr(vcpu, inst);
  1647. break;
  1648. case EVENT_MOV_TO_CR:
  1649. kvm_mov_to_cr(vcpu, inst);
  1650. break;
  1651. case EVENT_BSW_0:
  1652. vcpu_bsw0(vcpu);
  1653. break;
  1654. case EVENT_BSW_1:
  1655. vcpu_bsw1(vcpu);
  1656. break;
  1657. case EVENT_COVER:
  1658. vcpu_cover(vcpu);
  1659. break;
  1660. case EVENT_RFI:
  1661. vcpu_rfi(vcpu);
  1662. break;
  1663. case EVENT_ITR_D:
  1664. kvm_itr_d(vcpu, inst);
  1665. break;
  1666. case EVENT_ITR_I:
  1667. kvm_itr_i(vcpu, inst);
  1668. break;
  1669. case EVENT_PTR_D:
  1670. kvm_ptr_d(vcpu, inst);
  1671. break;
  1672. case EVENT_PTR_I:
  1673. kvm_ptr_i(vcpu, inst);
  1674. break;
  1675. case EVENT_ITC_D:
  1676. kvm_itc_d(vcpu, inst);
  1677. break;
  1678. case EVENT_ITC_I:
  1679. kvm_itc_i(vcpu, inst);
  1680. break;
  1681. case EVENT_PTC_L:
  1682. kvm_ptc_l(vcpu, inst);
  1683. break;
  1684. case EVENT_PTC_G:
  1685. kvm_ptc_g(vcpu, inst);
  1686. break;
  1687. case EVENT_PTC_GA:
  1688. kvm_ptc_ga(vcpu, inst);
  1689. break;
  1690. case EVENT_PTC_E:
  1691. kvm_ptc_e(vcpu, inst);
  1692. break;
  1693. case EVENT_MOV_TO_RR:
  1694. kvm_mov_to_rr(vcpu, inst);
  1695. break;
  1696. case EVENT_MOV_FROM_RR:
  1697. kvm_mov_from_rr(vcpu, inst);
  1698. break;
  1699. case EVENT_THASH:
  1700. kvm_thash(vcpu, inst);
  1701. break;
  1702. case EVENT_TTAG:
  1703. kvm_ttag(vcpu, inst);
  1704. break;
  1705. case EVENT_TPA:
  1706. status = kvm_tpa(vcpu, inst);
  1707. break;
  1708. case EVENT_TAK:
  1709. kvm_tak(vcpu, inst);
  1710. break;
  1711. case EVENT_MOV_TO_AR_IMM:
  1712. kvm_mov_to_ar_imm(vcpu, inst);
  1713. break;
  1714. case EVENT_MOV_TO_AR:
  1715. kvm_mov_to_ar_reg(vcpu, inst);
  1716. break;
  1717. case EVENT_MOV_FROM_AR:
  1718. kvm_mov_from_ar_reg(vcpu, inst);
  1719. break;
  1720. case EVENT_MOV_TO_DBR:
  1721. kvm_mov_to_dbr(vcpu, inst);
  1722. break;
  1723. case EVENT_MOV_TO_IBR:
  1724. kvm_mov_to_ibr(vcpu, inst);
  1725. break;
  1726. case EVENT_MOV_TO_PMC:
  1727. kvm_mov_to_pmc(vcpu, inst);
  1728. break;
  1729. case EVENT_MOV_TO_PMD:
  1730. kvm_mov_to_pmd(vcpu, inst);
  1731. break;
  1732. case EVENT_MOV_TO_PKR:
  1733. kvm_mov_to_pkr(vcpu, inst);
  1734. break;
  1735. case EVENT_MOV_FROM_DBR:
  1736. kvm_mov_from_dbr(vcpu, inst);
  1737. break;
  1738. case EVENT_MOV_FROM_IBR:
  1739. kvm_mov_from_ibr(vcpu, inst);
  1740. break;
  1741. case EVENT_MOV_FROM_PMC:
  1742. kvm_mov_from_pmc(vcpu, inst);
  1743. break;
  1744. case EVENT_MOV_FROM_PKR:
  1745. kvm_mov_from_pkr(vcpu, inst);
  1746. break;
  1747. case EVENT_MOV_FROM_CPUID:
  1748. kvm_mov_from_cpuid(vcpu, inst);
  1749. break;
  1750. case EVENT_VMSW:
  1751. status = IA64_FAULT;
  1752. break;
  1753. default:
  1754. break;
  1755. };
  1756. /*Assume all status is NO_FAULT ?*/
  1757. if (status == IA64_NO_FAULT && cause != EVENT_RFI)
  1758. vcpu_increment_iip(vcpu);
  1759. recover_if_physical_mode(vcpu);
  1760. }
  1761. void init_vcpu(struct kvm_vcpu *vcpu)
  1762. {
  1763. int i;
  1764. vcpu->arch.mode_flags = GUEST_IN_PHY;
  1765. VMX(vcpu, vrr[0]) = 0x38;
  1766. VMX(vcpu, vrr[1]) = 0x38;
  1767. VMX(vcpu, vrr[2]) = 0x38;
  1768. VMX(vcpu, vrr[3]) = 0x38;
  1769. VMX(vcpu, vrr[4]) = 0x38;
  1770. VMX(vcpu, vrr[5]) = 0x38;
  1771. VMX(vcpu, vrr[6]) = 0x38;
  1772. VMX(vcpu, vrr[7]) = 0x38;
  1773. VCPU(vcpu, vpsr) = IA64_PSR_BN;
  1774. VCPU(vcpu, dcr) = 0;
  1775. /* pta.size must not be 0. The minimum is 15 (32k) */
  1776. VCPU(vcpu, pta) = 15 << 2;
  1777. VCPU(vcpu, itv) = 0x10000;
  1778. VCPU(vcpu, itm) = 0;
  1779. VMX(vcpu, last_itc) = 0;
  1780. VCPU(vcpu, lid) = VCPU_LID(vcpu);
  1781. VCPU(vcpu, ivr) = 0;
  1782. VCPU(vcpu, tpr) = 0x10000;
  1783. VCPU(vcpu, eoi) = 0;
  1784. VCPU(vcpu, irr[0]) = 0;
  1785. VCPU(vcpu, irr[1]) = 0;
  1786. VCPU(vcpu, irr[2]) = 0;
  1787. VCPU(vcpu, irr[3]) = 0;
  1788. VCPU(vcpu, pmv) = 0x10000;
  1789. VCPU(vcpu, cmcv) = 0x10000;
  1790. VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
  1791. VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
  1792. update_vhpi(vcpu, NULL_VECTOR);
  1793. VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
  1794. for (i = 0; i < 4; i++)
  1795. VLSAPIC_INSVC(vcpu, i) = 0;
  1796. }
  1797. void kvm_init_all_rr(struct kvm_vcpu *vcpu)
  1798. {
  1799. unsigned long psr;
  1800. local_irq_save(psr);
  1801. /* WARNING: not allow co-exist of both virtual mode and physical
  1802. * mode in same region
  1803. */
  1804. vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
  1805. vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
  1806. if (is_physical_mode(vcpu)) {
  1807. if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  1808. panic_vm(vcpu, "Machine Status conflicts!\n");
  1809. ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
  1810. ia64_dv_serialize_data();
  1811. ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
  1812. ia64_dv_serialize_data();
  1813. } else {
  1814. ia64_set_rr((VRN0 << VRN_SHIFT),
  1815. vcpu->arch.metaphysical_saved_rr0);
  1816. ia64_dv_serialize_data();
  1817. ia64_set_rr((VRN4 << VRN_SHIFT),
  1818. vcpu->arch.metaphysical_saved_rr4);
  1819. ia64_dv_serialize_data();
  1820. }
  1821. ia64_set_rr((VRN1 << VRN_SHIFT),
  1822. vrrtomrr(VMX(vcpu, vrr[VRN1])));
  1823. ia64_dv_serialize_data();
  1824. ia64_set_rr((VRN2 << VRN_SHIFT),
  1825. vrrtomrr(VMX(vcpu, vrr[VRN2])));
  1826. ia64_dv_serialize_data();
  1827. ia64_set_rr((VRN3 << VRN_SHIFT),
  1828. vrrtomrr(VMX(vcpu, vrr[VRN3])));
  1829. ia64_dv_serialize_data();
  1830. ia64_set_rr((VRN5 << VRN_SHIFT),
  1831. vrrtomrr(VMX(vcpu, vrr[VRN5])));
  1832. ia64_dv_serialize_data();
  1833. ia64_set_rr((VRN7 << VRN_SHIFT),
  1834. vrrtomrr(VMX(vcpu, vrr[VRN7])));
  1835. ia64_dv_serialize_data();
  1836. ia64_srlz_d();
  1837. ia64_set_psr(psr);
  1838. }
  1839. int vmm_entry(void)
  1840. {
  1841. struct kvm_vcpu *v;
  1842. v = current_vcpu;
  1843. ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
  1844. 0, 0, 0, 0, 0, 0);
  1845. kvm_init_vtlb(v);
  1846. kvm_init_vhpt(v);
  1847. init_vcpu(v);
  1848. kvm_init_all_rr(v);
  1849. vmm_reset_entry();
  1850. return 0;
  1851. }
  1852. static void kvm_show_registers(struct kvm_pt_regs *regs)
  1853. {
  1854. unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
  1855. struct kvm_vcpu *vcpu = current_vcpu;
  1856. if (vcpu != NULL)
  1857. printk("vcpu 0x%p vcpu %d\n",
  1858. vcpu, vcpu->vcpu_id);
  1859. printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
  1860. regs->cr_ipsr, regs->cr_ifs, ip);
  1861. printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
  1862. regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
  1863. printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
  1864. regs->ar_rnat, regs->ar_bspstore, regs->pr);
  1865. printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
  1866. regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
  1867. printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
  1868. printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
  1869. regs->b6, regs->b7);
  1870. printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
  1871. regs->f6.u.bits[1], regs->f6.u.bits[0],
  1872. regs->f7.u.bits[1], regs->f7.u.bits[0]);
  1873. printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
  1874. regs->f8.u.bits[1], regs->f8.u.bits[0],
  1875. regs->f9.u.bits[1], regs->f9.u.bits[0]);
  1876. printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
  1877. regs->f10.u.bits[1], regs->f10.u.bits[0],
  1878. regs->f11.u.bits[1], regs->f11.u.bits[0]);
  1879. printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
  1880. regs->r2, regs->r3);
  1881. printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
  1882. regs->r9, regs->r10);
  1883. printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
  1884. regs->r12, regs->r13);
  1885. printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
  1886. regs->r15, regs->r16);
  1887. printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
  1888. regs->r18, regs->r19);
  1889. printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
  1890. regs->r21, regs->r22);
  1891. printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
  1892. regs->r24, regs->r25);
  1893. printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
  1894. regs->r27, regs->r28);
  1895. printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
  1896. regs->r30, regs->r31);
  1897. }
  1898. void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
  1899. {
  1900. va_list args;
  1901. char buf[256];
  1902. struct kvm_pt_regs *regs = vcpu_regs(v);
  1903. struct exit_ctl_data *p = &v->arch.exit_data;
  1904. va_start(args, fmt);
  1905. vsnprintf(buf, sizeof(buf), fmt, args);
  1906. va_end(args);
  1907. printk(buf);
  1908. kvm_show_registers(regs);
  1909. p->exit_reason = EXIT_REASON_VM_PANIC;
  1910. vmm_transition(v);
  1911. /*Never to return*/
  1912. while (1);
  1913. }