vcpu.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163
  1. /*
  2. * kvm_vcpu.c: handling all virtual cpu related thing.
  3. * Copyright (c) 2005, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Shaofan Li (Susue Li) <susie.li@intel.com>
  19. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  20. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21. * Xiantao Zhang <xiantao.zhang@intel.com>
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <asm/processor.h>
  26. #include <asm/ia64regs.h>
  27. #include <asm/gcc_intrin.h>
  28. #include <asm/kregs.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/tlb.h>
  31. #include "asm-offsets.h"
  32. #include "vcpu.h"
  33. /*
  34. * Special notes:
  35. * - Index by it/dt/rt sequence
  36. * - Only existing mode transitions are allowed in this table
  37. * - RSE is placed at lazy mode when emulating guest partial mode
  38. * - If gva happens to be rr0 and rr4, only allowed case is identity
  39. * mapping (gva=gpa), or panic! (How?)
  40. */
  41. int mm_switch_table[8][8] = {
  42. /* 2004/09/12(Kevin): Allow switch to self */
  43. /*
  44. * (it,dt,rt): (0,0,0) -> (1,1,1)
  45. * This kind of transition usually occurs in the very early
  46. * stage of Linux boot up procedure. Another case is in efi
  47. * and pal calls. (see "arch/ia64/kernel/head.S")
  48. *
  49. * (it,dt,rt): (0,0,0) -> (0,1,1)
  50. * This kind of transition is found when OSYa exits efi boot
  51. * service. Due to gva = gpa in this case (Same region),
  52. * data access can be satisfied though itlb entry for physical
  53. * emulation is hit.
  54. */
  55. {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
  56. {0, 0, 0, 0, 0, 0, 0, 0},
  57. {0, 0, 0, 0, 0, 0, 0, 0},
  58. /*
  59. * (it,dt,rt): (0,1,1) -> (1,1,1)
  60. * This kind of transition is found in OSYa.
  61. *
  62. * (it,dt,rt): (0,1,1) -> (0,0,0)
  63. * This kind of transition is found in OSYa
  64. */
  65. {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
  66. /* (1,0,0)->(1,1,1) */
  67. {0, 0, 0, 0, 0, 0, 0, SW_P2V},
  68. /*
  69. * (it,dt,rt): (1,0,1) -> (1,1,1)
  70. * This kind of transition usually occurs when Linux returns
  71. * from the low level TLB miss handlers.
  72. * (see "arch/ia64/kernel/ivt.S")
  73. */
  74. {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
  75. {0, 0, 0, 0, 0, 0, 0, 0},
  76. /*
  77. * (it,dt,rt): (1,1,1) -> (1,0,1)
  78. * This kind of transition usually occurs in Linux low level
  79. * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
  80. *
  81. * (it,dt,rt): (1,1,1) -> (0,0,0)
  82. * This kind of transition usually occurs in pal and efi calls,
  83. * which requires running in physical mode.
  84. * (see "arch/ia64/kernel/head.S")
  85. * (1,1,1)->(1,0,0)
  86. */
  87. {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
  88. };
  89. void physical_mode_init(struct kvm_vcpu *vcpu)
  90. {
  91. vcpu->arch.mode_flags = GUEST_IN_PHY;
  92. }
  93. void switch_to_physical_rid(struct kvm_vcpu *vcpu)
  94. {
  95. unsigned long psr;
  96. /* Save original virtual mode rr[0] and rr[4] */
  97. psr = ia64_clear_ic();
  98. ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
  99. ia64_srlz_d();
  100. ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
  101. ia64_srlz_d();
  102. ia64_set_psr(psr);
  103. return;
  104. }
  105. void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
  106. {
  107. unsigned long psr;
  108. psr = ia64_clear_ic();
  109. ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
  110. ia64_srlz_d();
  111. ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
  112. ia64_srlz_d();
  113. ia64_set_psr(psr);
  114. return;
  115. }
  116. static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
  117. {
  118. return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
  119. }
  120. void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  121. struct ia64_psr new_psr)
  122. {
  123. int act;
  124. act = mm_switch_action(old_psr, new_psr);
  125. switch (act) {
  126. case SW_V2P:
  127. /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
  128. old_psr.val, new_psr.val);*/
  129. switch_to_physical_rid(vcpu);
  130. /*
  131. * Set rse to enforced lazy, to prevent active rse
  132. *save/restor when guest physical mode.
  133. */
  134. vcpu->arch.mode_flags |= GUEST_IN_PHY;
  135. break;
  136. case SW_P2V:
  137. switch_to_virtual_rid(vcpu);
  138. /*
  139. * recover old mode which is saved when entering
  140. * guest physical mode
  141. */
  142. vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
  143. break;
  144. case SW_SELF:
  145. break;
  146. case SW_NOP:
  147. break;
  148. default:
  149. /* Sanity check */
  150. break;
  151. }
  152. return;
  153. }
  154. /*
  155. * In physical mode, insert tc/tr for region 0 and 4 uses
  156. * RID[0] and RID[4] which is for physical mode emulation.
  157. * However what those inserted tc/tr wants is rid for
  158. * virtual mode. So original virtual rid needs to be restored
  159. * before insert.
  160. *
  161. * Operations which required such switch include:
  162. * - insertions (itc.*, itr.*)
  163. * - purges (ptc.* and ptr.*)
  164. * - tpa
  165. * - tak
  166. * - thash?, ttag?
  167. * All above needs actual virtual rid for destination entry.
  168. */
  169. void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
  170. struct ia64_psr new_psr)
  171. {
  172. if ((old_psr.dt != new_psr.dt)
  173. || (old_psr.it != new_psr.it)
  174. || (old_psr.rt != new_psr.rt))
  175. switch_mm_mode(vcpu, old_psr, new_psr);
  176. return;
  177. }
  178. /*
  179. * In physical mode, insert tc/tr for region 0 and 4 uses
  180. * RID[0] and RID[4] which is for physical mode emulation.
  181. * However what those inserted tc/tr wants is rid for
  182. * virtual mode. So original virtual rid needs to be restored
  183. * before insert.
  184. *
  185. * Operations which required such switch include:
  186. * - insertions (itc.*, itr.*)
  187. * - purges (ptc.* and ptr.*)
  188. * - tpa
  189. * - tak
  190. * - thash?, ttag?
  191. * All above needs actual virtual rid for destination entry.
  192. */
  193. void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
  194. {
  195. if (is_physical_mode(vcpu)) {
  196. vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
  197. switch_to_virtual_rid(vcpu);
  198. }
  199. return;
  200. }
  201. /* Recover always follows prepare */
  202. void recover_if_physical_mode(struct kvm_vcpu *vcpu)
  203. {
  204. if (is_physical_mode(vcpu))
  205. switch_to_physical_rid(vcpu);
  206. vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
  207. return;
  208. }
  209. #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
  210. static u16 gr_info[32] = {
  211. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  212. RPT(r1), RPT(r2), RPT(r3),
  213. RPT(r4), RPT(r5), RPT(r6), RPT(r7),
  214. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  215. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  216. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  217. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  218. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  219. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  220. };
  221. #define IA64_FIRST_STACKED_GR 32
  222. #define IA64_FIRST_ROTATING_FR 32
  223. static inline unsigned long
  224. rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
  225. {
  226. reg += rrb;
  227. if (reg >= sor)
  228. reg -= sor;
  229. return reg;
  230. }
  231. /*
  232. * Return the (rotated) index for floating point register
  233. * be in the REGNUM (REGNUM must range from 32-127,
  234. * result is in the range from 0-95.
  235. */
  236. static inline unsigned long fph_index(struct kvm_pt_regs *regs,
  237. long regnum)
  238. {
  239. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  240. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  241. }
  242. /*
  243. * The inverse of the above: given bspstore and the number of
  244. * registers, calculate ar.bsp.
  245. */
  246. static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
  247. long num_regs)
  248. {
  249. long delta = ia64_rse_slot_num(addr) + num_regs;
  250. int i = 0;
  251. if (num_regs < 0)
  252. delta -= 0x3e;
  253. if (delta < 0) {
  254. while (delta <= -0x3f) {
  255. i--;
  256. delta += 0x3f;
  257. }
  258. } else {
  259. while (delta >= 0x3f) {
  260. i++;
  261. delta -= 0x3f;
  262. }
  263. }
  264. return addr + num_regs + i;
  265. }
  266. static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  267. unsigned long *val, int *nat)
  268. {
  269. unsigned long *bsp, *addr, *rnat_addr, *bspstore;
  270. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  271. unsigned long nat_mask;
  272. unsigned long old_rsc, new_rsc;
  273. long sof = (regs->cr_ifs) & 0x7f;
  274. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  275. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  276. long ridx = r1 - 32;
  277. if (ridx < sor)
  278. ridx = rotate_reg(sor, rrb_gr, ridx);
  279. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  280. new_rsc = old_rsc&(~(0x3));
  281. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  282. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  283. bsp = kbs + (regs->loadrs >> 19);
  284. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. rnat_addr = ia64_rse_rnat_addr(addr);
  287. if (addr >= bspstore) {
  288. ia64_flushrs();
  289. ia64_mf();
  290. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  291. }
  292. *val = *addr;
  293. if (nat) {
  294. if (bspstore < rnat_addr)
  295. *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
  296. & nat_mask);
  297. else
  298. *nat = (int)!!((*rnat_addr) & nat_mask);
  299. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  300. }
  301. }
  302. void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
  303. unsigned long val, unsigned long nat)
  304. {
  305. unsigned long *bsp, *bspstore, *addr, *rnat_addr;
  306. unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
  307. unsigned long nat_mask;
  308. unsigned long old_rsc, new_rsc, psr;
  309. unsigned long rnat;
  310. long sof = (regs->cr_ifs) & 0x7f;
  311. long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
  312. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  313. long ridx = r1 - 32;
  314. if (ridx < sor)
  315. ridx = rotate_reg(sor, rrb_gr, ridx);
  316. old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
  317. /* put RSC to lazy mode, and set loadrs 0 */
  318. new_rsc = old_rsc & (~0x3fff0003);
  319. ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
  320. bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
  321. addr = kvm_rse_skip_regs(bsp, -sof + ridx);
  322. nat_mask = 1UL << ia64_rse_slot_num(addr);
  323. rnat_addr = ia64_rse_rnat_addr(addr);
  324. local_irq_save(psr);
  325. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  326. if (addr >= bspstore) {
  327. ia64_flushrs();
  328. ia64_mf();
  329. *addr = val;
  330. bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
  331. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  332. if (bspstore < rnat_addr)
  333. rnat = rnat & (~nat_mask);
  334. else
  335. *rnat_addr = (*rnat_addr)&(~nat_mask);
  336. ia64_mf();
  337. ia64_loadrs();
  338. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  339. } else {
  340. rnat = ia64_getreg(_IA64_REG_AR_RNAT);
  341. *addr = val;
  342. if (bspstore < rnat_addr)
  343. rnat = rnat&(~nat_mask);
  344. else
  345. *rnat_addr = (*rnat_addr) & (~nat_mask);
  346. ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
  347. ia64_setreg(_IA64_REG_AR_RNAT, rnat);
  348. }
  349. local_irq_restore(psr);
  350. ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
  351. }
  352. void getreg(unsigned long regnum, unsigned long *val,
  353. int *nat, struct kvm_pt_regs *regs)
  354. {
  355. unsigned long addr, *unat;
  356. if (regnum >= IA64_FIRST_STACKED_GR) {
  357. get_rse_reg(regs, regnum, val, nat);
  358. return;
  359. }
  360. /*
  361. * Now look at registers in [0-31] range and init correct UNAT
  362. */
  363. addr = (unsigned long)regs;
  364. unat = &regs->eml_unat;;
  365. addr += gr_info[regnum];
  366. *val = *(unsigned long *)addr;
  367. /*
  368. * do it only when requested
  369. */
  370. if (nat)
  371. *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
  372. }
  373. void setreg(unsigned long regnum, unsigned long val,
  374. int nat, struct kvm_pt_regs *regs)
  375. {
  376. unsigned long addr;
  377. unsigned long bitmask;
  378. unsigned long *unat;
  379. /*
  380. * First takes care of stacked registers
  381. */
  382. if (regnum >= IA64_FIRST_STACKED_GR) {
  383. set_rse_reg(regs, regnum, val, nat);
  384. return;
  385. }
  386. /*
  387. * Now look at registers in [0-31] range and init correct UNAT
  388. */
  389. addr = (unsigned long)regs;
  390. unat = &regs->eml_unat;
  391. /*
  392. * add offset from base of struct
  393. * and do it !
  394. */
  395. addr += gr_info[regnum];
  396. *(unsigned long *)addr = val;
  397. /*
  398. * We need to clear the corresponding UNAT bit to fully emulate the load
  399. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  400. */
  401. bitmask = 1UL << ((addr >> 3) & 0x3f);
  402. if (nat)
  403. *unat |= bitmask;
  404. else
  405. *unat &= ~bitmask;
  406. }
  407. u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
  408. {
  409. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  410. u64 val;
  411. if (!reg)
  412. return 0;
  413. getreg(reg, &val, 0, regs);
  414. return val;
  415. }
  416. void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
  417. {
  418. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  419. long sof = (regs->cr_ifs) & 0x7f;
  420. if (!reg)
  421. return;
  422. if (reg >= sof + 32)
  423. return;
  424. setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
  425. }
  426. void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  427. struct kvm_pt_regs *regs)
  428. {
  429. /* Take floating register rotation into consideration*/
  430. if (regnum >= IA64_FIRST_ROTATING_FR)
  431. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  432. #define CASE_FIXED_FP(reg) \
  433. case (reg) : \
  434. ia64_stf_spill(fpval, reg); \
  435. break
  436. switch (regnum) {
  437. CASE_FIXED_FP(0);
  438. CASE_FIXED_FP(1);
  439. CASE_FIXED_FP(2);
  440. CASE_FIXED_FP(3);
  441. CASE_FIXED_FP(4);
  442. CASE_FIXED_FP(5);
  443. CASE_FIXED_FP(6);
  444. CASE_FIXED_FP(7);
  445. CASE_FIXED_FP(8);
  446. CASE_FIXED_FP(9);
  447. CASE_FIXED_FP(10);
  448. CASE_FIXED_FP(11);
  449. CASE_FIXED_FP(12);
  450. CASE_FIXED_FP(13);
  451. CASE_FIXED_FP(14);
  452. CASE_FIXED_FP(15);
  453. CASE_FIXED_FP(16);
  454. CASE_FIXED_FP(17);
  455. CASE_FIXED_FP(18);
  456. CASE_FIXED_FP(19);
  457. CASE_FIXED_FP(20);
  458. CASE_FIXED_FP(21);
  459. CASE_FIXED_FP(22);
  460. CASE_FIXED_FP(23);
  461. CASE_FIXED_FP(24);
  462. CASE_FIXED_FP(25);
  463. CASE_FIXED_FP(26);
  464. CASE_FIXED_FP(27);
  465. CASE_FIXED_FP(28);
  466. CASE_FIXED_FP(29);
  467. CASE_FIXED_FP(30);
  468. CASE_FIXED_FP(31);
  469. CASE_FIXED_FP(32);
  470. CASE_FIXED_FP(33);
  471. CASE_FIXED_FP(34);
  472. CASE_FIXED_FP(35);
  473. CASE_FIXED_FP(36);
  474. CASE_FIXED_FP(37);
  475. CASE_FIXED_FP(38);
  476. CASE_FIXED_FP(39);
  477. CASE_FIXED_FP(40);
  478. CASE_FIXED_FP(41);
  479. CASE_FIXED_FP(42);
  480. CASE_FIXED_FP(43);
  481. CASE_FIXED_FP(44);
  482. CASE_FIXED_FP(45);
  483. CASE_FIXED_FP(46);
  484. CASE_FIXED_FP(47);
  485. CASE_FIXED_FP(48);
  486. CASE_FIXED_FP(49);
  487. CASE_FIXED_FP(50);
  488. CASE_FIXED_FP(51);
  489. CASE_FIXED_FP(52);
  490. CASE_FIXED_FP(53);
  491. CASE_FIXED_FP(54);
  492. CASE_FIXED_FP(55);
  493. CASE_FIXED_FP(56);
  494. CASE_FIXED_FP(57);
  495. CASE_FIXED_FP(58);
  496. CASE_FIXED_FP(59);
  497. CASE_FIXED_FP(60);
  498. CASE_FIXED_FP(61);
  499. CASE_FIXED_FP(62);
  500. CASE_FIXED_FP(63);
  501. CASE_FIXED_FP(64);
  502. CASE_FIXED_FP(65);
  503. CASE_FIXED_FP(66);
  504. CASE_FIXED_FP(67);
  505. CASE_FIXED_FP(68);
  506. CASE_FIXED_FP(69);
  507. CASE_FIXED_FP(70);
  508. CASE_FIXED_FP(71);
  509. CASE_FIXED_FP(72);
  510. CASE_FIXED_FP(73);
  511. CASE_FIXED_FP(74);
  512. CASE_FIXED_FP(75);
  513. CASE_FIXED_FP(76);
  514. CASE_FIXED_FP(77);
  515. CASE_FIXED_FP(78);
  516. CASE_FIXED_FP(79);
  517. CASE_FIXED_FP(80);
  518. CASE_FIXED_FP(81);
  519. CASE_FIXED_FP(82);
  520. CASE_FIXED_FP(83);
  521. CASE_FIXED_FP(84);
  522. CASE_FIXED_FP(85);
  523. CASE_FIXED_FP(86);
  524. CASE_FIXED_FP(87);
  525. CASE_FIXED_FP(88);
  526. CASE_FIXED_FP(89);
  527. CASE_FIXED_FP(90);
  528. CASE_FIXED_FP(91);
  529. CASE_FIXED_FP(92);
  530. CASE_FIXED_FP(93);
  531. CASE_FIXED_FP(94);
  532. CASE_FIXED_FP(95);
  533. CASE_FIXED_FP(96);
  534. CASE_FIXED_FP(97);
  535. CASE_FIXED_FP(98);
  536. CASE_FIXED_FP(99);
  537. CASE_FIXED_FP(100);
  538. CASE_FIXED_FP(101);
  539. CASE_FIXED_FP(102);
  540. CASE_FIXED_FP(103);
  541. CASE_FIXED_FP(104);
  542. CASE_FIXED_FP(105);
  543. CASE_FIXED_FP(106);
  544. CASE_FIXED_FP(107);
  545. CASE_FIXED_FP(108);
  546. CASE_FIXED_FP(109);
  547. CASE_FIXED_FP(110);
  548. CASE_FIXED_FP(111);
  549. CASE_FIXED_FP(112);
  550. CASE_FIXED_FP(113);
  551. CASE_FIXED_FP(114);
  552. CASE_FIXED_FP(115);
  553. CASE_FIXED_FP(116);
  554. CASE_FIXED_FP(117);
  555. CASE_FIXED_FP(118);
  556. CASE_FIXED_FP(119);
  557. CASE_FIXED_FP(120);
  558. CASE_FIXED_FP(121);
  559. CASE_FIXED_FP(122);
  560. CASE_FIXED_FP(123);
  561. CASE_FIXED_FP(124);
  562. CASE_FIXED_FP(125);
  563. CASE_FIXED_FP(126);
  564. CASE_FIXED_FP(127);
  565. }
  566. #undef CASE_FIXED_FP
  567. }
  568. void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
  569. struct kvm_pt_regs *regs)
  570. {
  571. /* Take floating register rotation into consideration*/
  572. if (regnum >= IA64_FIRST_ROTATING_FR)
  573. regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
  574. #define CASE_FIXED_FP(reg) \
  575. case (reg) : \
  576. ia64_ldf_fill(reg, fpval); \
  577. break
  578. switch (regnum) {
  579. CASE_FIXED_FP(2);
  580. CASE_FIXED_FP(3);
  581. CASE_FIXED_FP(4);
  582. CASE_FIXED_FP(5);
  583. CASE_FIXED_FP(6);
  584. CASE_FIXED_FP(7);
  585. CASE_FIXED_FP(8);
  586. CASE_FIXED_FP(9);
  587. CASE_FIXED_FP(10);
  588. CASE_FIXED_FP(11);
  589. CASE_FIXED_FP(12);
  590. CASE_FIXED_FP(13);
  591. CASE_FIXED_FP(14);
  592. CASE_FIXED_FP(15);
  593. CASE_FIXED_FP(16);
  594. CASE_FIXED_FP(17);
  595. CASE_FIXED_FP(18);
  596. CASE_FIXED_FP(19);
  597. CASE_FIXED_FP(20);
  598. CASE_FIXED_FP(21);
  599. CASE_FIXED_FP(22);
  600. CASE_FIXED_FP(23);
  601. CASE_FIXED_FP(24);
  602. CASE_FIXED_FP(25);
  603. CASE_FIXED_FP(26);
  604. CASE_FIXED_FP(27);
  605. CASE_FIXED_FP(28);
  606. CASE_FIXED_FP(29);
  607. CASE_FIXED_FP(30);
  608. CASE_FIXED_FP(31);
  609. CASE_FIXED_FP(32);
  610. CASE_FIXED_FP(33);
  611. CASE_FIXED_FP(34);
  612. CASE_FIXED_FP(35);
  613. CASE_FIXED_FP(36);
  614. CASE_FIXED_FP(37);
  615. CASE_FIXED_FP(38);
  616. CASE_FIXED_FP(39);
  617. CASE_FIXED_FP(40);
  618. CASE_FIXED_FP(41);
  619. CASE_FIXED_FP(42);
  620. CASE_FIXED_FP(43);
  621. CASE_FIXED_FP(44);
  622. CASE_FIXED_FP(45);
  623. CASE_FIXED_FP(46);
  624. CASE_FIXED_FP(47);
  625. CASE_FIXED_FP(48);
  626. CASE_FIXED_FP(49);
  627. CASE_FIXED_FP(50);
  628. CASE_FIXED_FP(51);
  629. CASE_FIXED_FP(52);
  630. CASE_FIXED_FP(53);
  631. CASE_FIXED_FP(54);
  632. CASE_FIXED_FP(55);
  633. CASE_FIXED_FP(56);
  634. CASE_FIXED_FP(57);
  635. CASE_FIXED_FP(58);
  636. CASE_FIXED_FP(59);
  637. CASE_FIXED_FP(60);
  638. CASE_FIXED_FP(61);
  639. CASE_FIXED_FP(62);
  640. CASE_FIXED_FP(63);
  641. CASE_FIXED_FP(64);
  642. CASE_FIXED_FP(65);
  643. CASE_FIXED_FP(66);
  644. CASE_FIXED_FP(67);
  645. CASE_FIXED_FP(68);
  646. CASE_FIXED_FP(69);
  647. CASE_FIXED_FP(70);
  648. CASE_FIXED_FP(71);
  649. CASE_FIXED_FP(72);
  650. CASE_FIXED_FP(73);
  651. CASE_FIXED_FP(74);
  652. CASE_FIXED_FP(75);
  653. CASE_FIXED_FP(76);
  654. CASE_FIXED_FP(77);
  655. CASE_FIXED_FP(78);
  656. CASE_FIXED_FP(79);
  657. CASE_FIXED_FP(80);
  658. CASE_FIXED_FP(81);
  659. CASE_FIXED_FP(82);
  660. CASE_FIXED_FP(83);
  661. CASE_FIXED_FP(84);
  662. CASE_FIXED_FP(85);
  663. CASE_FIXED_FP(86);
  664. CASE_FIXED_FP(87);
  665. CASE_FIXED_FP(88);
  666. CASE_FIXED_FP(89);
  667. CASE_FIXED_FP(90);
  668. CASE_FIXED_FP(91);
  669. CASE_FIXED_FP(92);
  670. CASE_FIXED_FP(93);
  671. CASE_FIXED_FP(94);
  672. CASE_FIXED_FP(95);
  673. CASE_FIXED_FP(96);
  674. CASE_FIXED_FP(97);
  675. CASE_FIXED_FP(98);
  676. CASE_FIXED_FP(99);
  677. CASE_FIXED_FP(100);
  678. CASE_FIXED_FP(101);
  679. CASE_FIXED_FP(102);
  680. CASE_FIXED_FP(103);
  681. CASE_FIXED_FP(104);
  682. CASE_FIXED_FP(105);
  683. CASE_FIXED_FP(106);
  684. CASE_FIXED_FP(107);
  685. CASE_FIXED_FP(108);
  686. CASE_FIXED_FP(109);
  687. CASE_FIXED_FP(110);
  688. CASE_FIXED_FP(111);
  689. CASE_FIXED_FP(112);
  690. CASE_FIXED_FP(113);
  691. CASE_FIXED_FP(114);
  692. CASE_FIXED_FP(115);
  693. CASE_FIXED_FP(116);
  694. CASE_FIXED_FP(117);
  695. CASE_FIXED_FP(118);
  696. CASE_FIXED_FP(119);
  697. CASE_FIXED_FP(120);
  698. CASE_FIXED_FP(121);
  699. CASE_FIXED_FP(122);
  700. CASE_FIXED_FP(123);
  701. CASE_FIXED_FP(124);
  702. CASE_FIXED_FP(125);
  703. CASE_FIXED_FP(126);
  704. CASE_FIXED_FP(127);
  705. }
  706. }
  707. void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  708. struct ia64_fpreg *val)
  709. {
  710. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  711. getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  712. }
  713. void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
  714. struct ia64_fpreg *val)
  715. {
  716. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  717. if (reg > 1)
  718. setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
  719. }
  720. /************************************************************************
  721. * lsapic timer
  722. ***********************************************************************/
  723. u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
  724. {
  725. unsigned long guest_itc;
  726. guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
  727. if (guest_itc >= VMX(vcpu, last_itc)) {
  728. VMX(vcpu, last_itc) = guest_itc;
  729. return guest_itc;
  730. } else
  731. return VMX(vcpu, last_itc);
  732. }
  733. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
  734. static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
  735. {
  736. struct kvm_vcpu *v;
  737. int i;
  738. long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
  739. unsigned long vitv = VCPU(vcpu, itv);
  740. if (vcpu->vcpu_id == 0) {
  741. for (i = 0; i < MAX_VCPU_NUM; i++) {
  742. v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
  743. VMX(v, itc_offset) = itc_offset;
  744. VMX(v, last_itc) = 0;
  745. }
  746. }
  747. VMX(vcpu, last_itc) = 0;
  748. if (VCPU(vcpu, itm) <= val) {
  749. VMX(vcpu, itc_check) = 0;
  750. vcpu_unpend_interrupt(vcpu, vitv);
  751. } else {
  752. VMX(vcpu, itc_check) = 1;
  753. vcpu_set_itm(vcpu, VCPU(vcpu, itm));
  754. }
  755. }
  756. static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
  757. {
  758. return ((u64)VCPU(vcpu, itm));
  759. }
  760. static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
  761. {
  762. unsigned long vitv = VCPU(vcpu, itv);
  763. VCPU(vcpu, itm) = val;
  764. if (val > vcpu_get_itc(vcpu)) {
  765. VMX(vcpu, itc_check) = 1;
  766. vcpu_unpend_interrupt(vcpu, vitv);
  767. VMX(vcpu, timer_pending) = 0;
  768. } else
  769. VMX(vcpu, itc_check) = 0;
  770. }
  771. #define ITV_VECTOR(itv) (itv&0xff)
  772. #define ITV_IRQ_MASK(itv) (itv&(1<<16))
  773. static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
  774. {
  775. VCPU(vcpu, itv) = val;
  776. if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
  777. vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
  778. vcpu->arch.timer_pending = 0;
  779. }
  780. }
  781. static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
  782. {
  783. int vec;
  784. vec = highest_inservice_irq(vcpu);
  785. if (vec == NULL_VECTOR)
  786. return;
  787. VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
  788. VCPU(vcpu, eoi) = 0;
  789. vcpu->arch.irq_new_pending = 1;
  790. }
  791. /* See Table 5-8 in SDM vol2 for the definition */
  792. int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
  793. {
  794. union ia64_tpr vtpr;
  795. vtpr.val = VCPU(vcpu, tpr);
  796. if (h_inservice == NMI_VECTOR)
  797. return IRQ_MASKED_BY_INSVC;
  798. if (h_pending == NMI_VECTOR) {
  799. /* Non Maskable Interrupt */
  800. return IRQ_NO_MASKED;
  801. }
  802. if (h_inservice == ExtINT_VECTOR)
  803. return IRQ_MASKED_BY_INSVC;
  804. if (h_pending == ExtINT_VECTOR) {
  805. if (vtpr.mmi) {
  806. /* mask all external IRQ */
  807. return IRQ_MASKED_BY_VTPR;
  808. } else
  809. return IRQ_NO_MASKED;
  810. }
  811. if (is_higher_irq(h_pending, h_inservice)) {
  812. if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
  813. return IRQ_NO_MASKED;
  814. else
  815. return IRQ_MASKED_BY_VTPR;
  816. } else {
  817. return IRQ_MASKED_BY_INSVC;
  818. }
  819. }
  820. void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  821. {
  822. long spsr;
  823. int ret;
  824. local_irq_save(spsr);
  825. ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
  826. local_irq_restore(spsr);
  827. vcpu->arch.irq_new_pending = 1;
  828. }
  829. void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
  830. {
  831. long spsr;
  832. int ret;
  833. local_irq_save(spsr);
  834. ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
  835. local_irq_restore(spsr);
  836. if (ret) {
  837. vcpu->arch.irq_new_pending = 1;
  838. wmb();
  839. }
  840. }
  841. void update_vhpi(struct kvm_vcpu *vcpu, int vec)
  842. {
  843. u64 vhpi;
  844. if (vec == NULL_VECTOR)
  845. vhpi = 0;
  846. else if (vec == NMI_VECTOR)
  847. vhpi = 32;
  848. else if (vec == ExtINT_VECTOR)
  849. vhpi = 16;
  850. else
  851. vhpi = vec >> 4;
  852. VCPU(vcpu, vhpi) = vhpi;
  853. if (VCPU(vcpu, vac).a_int)
  854. ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
  855. (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
  856. }
  857. u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
  858. {
  859. int vec, h_inservice, mask;
  860. vec = highest_pending_irq(vcpu);
  861. h_inservice = highest_inservice_irq(vcpu);
  862. mask = irq_masked(vcpu, vec, h_inservice);
  863. if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
  864. if (VCPU(vcpu, vhpi))
  865. update_vhpi(vcpu, NULL_VECTOR);
  866. return IA64_SPURIOUS_INT_VECTOR;
  867. }
  868. if (mask == IRQ_MASKED_BY_VTPR) {
  869. update_vhpi(vcpu, vec);
  870. return IA64_SPURIOUS_INT_VECTOR;
  871. }
  872. VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
  873. vcpu_unpend_interrupt(vcpu, vec);
  874. return (u64)vec;
  875. }
  876. /**************************************************************************
  877. Privileged operation emulation routines
  878. **************************************************************************/
  879. u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
  880. {
  881. union ia64_pta vpta;
  882. union ia64_rr vrr;
  883. u64 pval;
  884. u64 vhpt_offset;
  885. vpta.val = vcpu_get_pta(vcpu);
  886. vrr.val = vcpu_get_rr(vcpu, vadr);
  887. vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
  888. if (vpta.vf) {
  889. pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
  890. vpta.val, 0, 0, 0, 0);
  891. } else {
  892. pval = (vadr & VRN_MASK) | vhpt_offset |
  893. (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
  894. }
  895. return pval;
  896. }
  897. u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
  898. {
  899. union ia64_rr vrr;
  900. union ia64_pta vpta;
  901. u64 pval;
  902. vpta.val = vcpu_get_pta(vcpu);
  903. vrr.val = vcpu_get_rr(vcpu, vadr);
  904. if (vpta.vf) {
  905. pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
  906. 0, 0, 0, 0, 0);
  907. } else
  908. pval = 1;
  909. return pval;
  910. }
  911. u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
  912. {
  913. struct thash_data *data;
  914. union ia64_pta vpta;
  915. u64 key;
  916. vpta.val = vcpu_get_pta(vcpu);
  917. if (vpta.vf == 0) {
  918. key = 1;
  919. return key;
  920. }
  921. data = vtlb_lookup(vcpu, vadr, D_TLB);
  922. if (!data || !data->p)
  923. key = 1;
  924. else
  925. key = data->key;
  926. return key;
  927. }
  928. void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
  929. {
  930. unsigned long thash, vadr;
  931. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  932. thash = vcpu_thash(vcpu, vadr);
  933. vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
  934. }
  935. void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
  936. {
  937. unsigned long tag, vadr;
  938. vadr = vcpu_get_gr(vcpu, inst.M46.r3);
  939. tag = vcpu_ttag(vcpu, vadr);
  940. vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
  941. }
  942. int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
  943. {
  944. struct thash_data *data;
  945. union ia64_isr visr, pt_isr;
  946. struct kvm_pt_regs *regs;
  947. struct ia64_psr vpsr;
  948. regs = vcpu_regs(vcpu);
  949. pt_isr.val = VMX(vcpu, cr_isr);
  950. visr.val = 0;
  951. visr.ei = pt_isr.ei;
  952. visr.ir = pt_isr.ir;
  953. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  954. visr.na = 1;
  955. data = vhpt_lookup(vadr);
  956. if (data) {
  957. if (data->p == 0) {
  958. vcpu_set_isr(vcpu, visr.val);
  959. data_page_not_present(vcpu, vadr);
  960. return IA64_FAULT;
  961. } else if (data->ma == VA_MATTR_NATPAGE) {
  962. vcpu_set_isr(vcpu, visr.val);
  963. dnat_page_consumption(vcpu, vadr);
  964. return IA64_FAULT;
  965. } else {
  966. *padr = (data->gpaddr >> data->ps << data->ps) |
  967. (vadr & (PSIZE(data->ps) - 1));
  968. return IA64_NO_FAULT;
  969. }
  970. }
  971. data = vtlb_lookup(vcpu, vadr, D_TLB);
  972. if (data) {
  973. if (data->p == 0) {
  974. vcpu_set_isr(vcpu, visr.val);
  975. data_page_not_present(vcpu, vadr);
  976. return IA64_FAULT;
  977. } else if (data->ma == VA_MATTR_NATPAGE) {
  978. vcpu_set_isr(vcpu, visr.val);
  979. dnat_page_consumption(vcpu, vadr);
  980. return IA64_FAULT;
  981. } else{
  982. *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
  983. | (vadr & (PSIZE(data->ps) - 1));
  984. return IA64_NO_FAULT;
  985. }
  986. }
  987. if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
  988. if (vpsr.ic) {
  989. vcpu_set_isr(vcpu, visr.val);
  990. alt_dtlb(vcpu, vadr);
  991. return IA64_FAULT;
  992. } else {
  993. nested_dtlb(vcpu);
  994. return IA64_FAULT;
  995. }
  996. } else {
  997. if (vpsr.ic) {
  998. vcpu_set_isr(vcpu, visr.val);
  999. dvhpt_fault(vcpu, vadr);
  1000. return IA64_FAULT;
  1001. } else{
  1002. nested_dtlb(vcpu);
  1003. return IA64_FAULT;
  1004. }
  1005. }
  1006. return IA64_NO_FAULT;
  1007. }
  1008. int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
  1009. {
  1010. unsigned long r1, r3;
  1011. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1012. if (vcpu_tpa(vcpu, r3, &r1))
  1013. return IA64_FAULT;
  1014. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1015. return(IA64_NO_FAULT);
  1016. }
  1017. void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
  1018. {
  1019. unsigned long r1, r3;
  1020. r3 = vcpu_get_gr(vcpu, inst.M46.r3);
  1021. r1 = vcpu_tak(vcpu, r3);
  1022. vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
  1023. }
  1024. /************************************
  1025. * Insert/Purge translation register/cache
  1026. ************************************/
  1027. void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1028. {
  1029. thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
  1030. }
  1031. void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
  1032. {
  1033. thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
  1034. }
  1035. void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1036. {
  1037. u64 ps, va, rid;
  1038. struct thash_data *p_itr;
  1039. ps = itir_ps(itir);
  1040. va = PAGEALIGN(ifa, ps);
  1041. pte &= ~PAGE_FLAGS_RV_MASK;
  1042. rid = vcpu_get_rr(vcpu, ifa);
  1043. rid = rid & RR_RID_MASK;
  1044. p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
  1045. vcpu_set_tr(p_itr, pte, itir, va, rid);
  1046. vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
  1047. }
  1048. void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
  1049. {
  1050. u64 gpfn;
  1051. u64 ps, va, rid;
  1052. struct thash_data *p_dtr;
  1053. ps = itir_ps(itir);
  1054. va = PAGEALIGN(ifa, ps);
  1055. pte &= ~PAGE_FLAGS_RV_MASK;
  1056. if (ps != _PAGE_SIZE_16M)
  1057. thash_purge_entries(vcpu, va, ps);
  1058. gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
  1059. if (__gpfn_is_io(gpfn))
  1060. pte |= VTLB_PTE_IO;
  1061. rid = vcpu_get_rr(vcpu, va);
  1062. rid = rid & RR_RID_MASK;
  1063. p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
  1064. vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
  1065. pte, itir, va, rid);
  1066. vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
  1067. }
  1068. void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1069. {
  1070. int index;
  1071. u64 va;
  1072. va = PAGEALIGN(ifa, ps);
  1073. while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
  1074. vcpu->arch.dtrs[index].page_flags = 0;
  1075. thash_purge_entries(vcpu, va, ps);
  1076. }
  1077. void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
  1078. {
  1079. int index;
  1080. u64 va;
  1081. va = PAGEALIGN(ifa, ps);
  1082. while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
  1083. vcpu->arch.itrs[index].page_flags = 0;
  1084. thash_purge_entries(vcpu, va, ps);
  1085. }
  1086. void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1087. {
  1088. va = PAGEALIGN(va, ps);
  1089. thash_purge_entries(vcpu, va, ps);
  1090. }
  1091. void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
  1092. {
  1093. thash_purge_all(vcpu);
  1094. }
  1095. void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1096. {
  1097. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1098. long psr;
  1099. local_irq_save(psr);
  1100. p->exit_reason = EXIT_REASON_PTC_G;
  1101. p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
  1102. p->u.ptc_g_data.vaddr = va;
  1103. p->u.ptc_g_data.ps = ps;
  1104. vmm_transition(vcpu);
  1105. /* Do Local Purge Here*/
  1106. vcpu_ptc_l(vcpu, va, ps);
  1107. local_irq_restore(psr);
  1108. }
  1109. void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
  1110. {
  1111. vcpu_ptc_ga(vcpu, va, ps);
  1112. }
  1113. void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
  1114. {
  1115. unsigned long ifa;
  1116. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1117. vcpu_ptc_e(vcpu, ifa);
  1118. }
  1119. void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
  1120. {
  1121. unsigned long ifa, itir;
  1122. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1123. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1124. vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
  1125. }
  1126. void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
  1127. {
  1128. unsigned long ifa, itir;
  1129. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1130. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1131. vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
  1132. }
  1133. void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
  1134. {
  1135. unsigned long ifa, itir;
  1136. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1137. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1138. vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
  1139. }
  1140. void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1141. {
  1142. unsigned long ifa, itir;
  1143. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1144. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1145. vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
  1146. }
  1147. void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1148. {
  1149. unsigned long ifa, itir;
  1150. ifa = vcpu_get_gr(vcpu, inst.M45.r3);
  1151. itir = vcpu_get_gr(vcpu, inst.M45.r2);
  1152. vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
  1153. }
  1154. void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
  1155. {
  1156. unsigned long itir, ifa, pte, slot;
  1157. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1158. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1159. itir = vcpu_get_itir(vcpu);
  1160. ifa = vcpu_get_ifa(vcpu);
  1161. vcpu_itr_d(vcpu, slot, pte, itir, ifa);
  1162. }
  1163. void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
  1164. {
  1165. unsigned long itir, ifa, pte, slot;
  1166. slot = vcpu_get_gr(vcpu, inst.M45.r3);
  1167. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1168. itir = vcpu_get_itir(vcpu);
  1169. ifa = vcpu_get_ifa(vcpu);
  1170. vcpu_itr_i(vcpu, slot, pte, itir, ifa);
  1171. }
  1172. void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
  1173. {
  1174. unsigned long itir, ifa, pte;
  1175. itir = vcpu_get_itir(vcpu);
  1176. ifa = vcpu_get_ifa(vcpu);
  1177. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1178. vcpu_itc_d(vcpu, pte, itir, ifa);
  1179. }
  1180. void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
  1181. {
  1182. unsigned long itir, ifa, pte;
  1183. itir = vcpu_get_itir(vcpu);
  1184. ifa = vcpu_get_ifa(vcpu);
  1185. pte = vcpu_get_gr(vcpu, inst.M45.r2);
  1186. vcpu_itc_i(vcpu, pte, itir, ifa);
  1187. }
  1188. /*************************************
  1189. * Moves to semi-privileged registers
  1190. *************************************/
  1191. void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
  1192. {
  1193. unsigned long imm;
  1194. if (inst.M30.s)
  1195. imm = -inst.M30.imm;
  1196. else
  1197. imm = inst.M30.imm;
  1198. vcpu_set_itc(vcpu, imm);
  1199. }
  1200. void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1201. {
  1202. unsigned long r2;
  1203. r2 = vcpu_get_gr(vcpu, inst.M29.r2);
  1204. vcpu_set_itc(vcpu, r2);
  1205. }
  1206. void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
  1207. {
  1208. unsigned long r1;
  1209. r1 = vcpu_get_itc(vcpu);
  1210. vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
  1211. }
  1212. /**************************************************************************
  1213. struct kvm_vcpu*protection key register access routines
  1214. **************************************************************************/
  1215. unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
  1216. {
  1217. return ((unsigned long)ia64_get_pkr(reg));
  1218. }
  1219. void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
  1220. {
  1221. ia64_set_pkr(reg, val);
  1222. }
  1223. unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
  1224. {
  1225. union ia64_rr rr, rr1;
  1226. rr.val = vcpu_get_rr(vcpu, ifa);
  1227. rr1.val = 0;
  1228. rr1.ps = rr.ps;
  1229. rr1.rid = rr.rid;
  1230. return (rr1.val);
  1231. }
  1232. /********************************
  1233. * Moves to privileged registers
  1234. ********************************/
  1235. unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
  1236. unsigned long val)
  1237. {
  1238. union ia64_rr oldrr, newrr;
  1239. unsigned long rrval;
  1240. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  1241. unsigned long psr;
  1242. oldrr.val = vcpu_get_rr(vcpu, reg);
  1243. newrr.val = val;
  1244. vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
  1245. switch ((unsigned long)(reg >> VRN_SHIFT)) {
  1246. case VRN6:
  1247. vcpu->arch.vmm_rr = vrrtomrr(val);
  1248. local_irq_save(psr);
  1249. p->exit_reason = EXIT_REASON_SWITCH_RR6;
  1250. vmm_transition(vcpu);
  1251. local_irq_restore(psr);
  1252. break;
  1253. case VRN4:
  1254. rrval = vrrtomrr(val);
  1255. vcpu->arch.metaphysical_saved_rr4 = rrval;
  1256. if (!is_physical_mode(vcpu))
  1257. ia64_set_rr(reg, rrval);
  1258. break;
  1259. case VRN0:
  1260. rrval = vrrtomrr(val);
  1261. vcpu->arch.metaphysical_saved_rr0 = rrval;
  1262. if (!is_physical_mode(vcpu))
  1263. ia64_set_rr(reg, rrval);
  1264. break;
  1265. default:
  1266. ia64_set_rr(reg, vrrtomrr(val));
  1267. break;
  1268. }
  1269. return (IA64_NO_FAULT);
  1270. }
  1271. void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1272. {
  1273. unsigned long r3, r2;
  1274. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1275. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1276. vcpu_set_rr(vcpu, r3, r2);
  1277. }
  1278. void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1279. {
  1280. }
  1281. void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1282. {
  1283. }
  1284. void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1285. {
  1286. unsigned long r3, r2;
  1287. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1288. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1289. vcpu_set_pmc(vcpu, r3, r2);
  1290. }
  1291. void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
  1292. {
  1293. unsigned long r3, r2;
  1294. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1295. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1296. vcpu_set_pmd(vcpu, r3, r2);
  1297. }
  1298. void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1299. {
  1300. u64 r3, r2;
  1301. r3 = vcpu_get_gr(vcpu, inst.M42.r3);
  1302. r2 = vcpu_get_gr(vcpu, inst.M42.r2);
  1303. vcpu_set_pkr(vcpu, r3, r2);
  1304. }
  1305. void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
  1306. {
  1307. unsigned long r3, r1;
  1308. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1309. r1 = vcpu_get_rr(vcpu, r3);
  1310. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1311. }
  1312. void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
  1313. {
  1314. unsigned long r3, r1;
  1315. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1316. r1 = vcpu_get_pkr(vcpu, r3);
  1317. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1318. }
  1319. void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
  1320. {
  1321. unsigned long r3, r1;
  1322. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1323. r1 = vcpu_get_dbr(vcpu, r3);
  1324. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1325. }
  1326. void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
  1327. {
  1328. unsigned long r3, r1;
  1329. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1330. r1 = vcpu_get_ibr(vcpu, r3);
  1331. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1332. }
  1333. void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
  1334. {
  1335. unsigned long r3, r1;
  1336. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1337. r1 = vcpu_get_pmc(vcpu, r3);
  1338. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1339. }
  1340. unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
  1341. {
  1342. /* FIXME: This could get called as a result of a rsvd-reg fault */
  1343. if (reg > (ia64_get_cpuid(3) & 0xff))
  1344. return 0;
  1345. else
  1346. return ia64_get_cpuid(reg);
  1347. }
  1348. void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
  1349. {
  1350. unsigned long r3, r1;
  1351. r3 = vcpu_get_gr(vcpu, inst.M43.r3);
  1352. r1 = vcpu_get_cpuid(vcpu, r3);
  1353. vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
  1354. }
  1355. void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
  1356. {
  1357. VCPU(vcpu, tpr) = val;
  1358. vcpu->arch.irq_check = 1;
  1359. }
  1360. unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1361. {
  1362. unsigned long r2;
  1363. r2 = vcpu_get_gr(vcpu, inst.M32.r2);
  1364. VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
  1365. switch (inst.M32.cr3) {
  1366. case 0:
  1367. vcpu_set_dcr(vcpu, r2);
  1368. break;
  1369. case 1:
  1370. vcpu_set_itm(vcpu, r2);
  1371. break;
  1372. case 66:
  1373. vcpu_set_tpr(vcpu, r2);
  1374. break;
  1375. case 67:
  1376. vcpu_set_eoi(vcpu, r2);
  1377. break;
  1378. default:
  1379. break;
  1380. }
  1381. return 0;
  1382. }
  1383. unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
  1384. {
  1385. unsigned long tgt = inst.M33.r1;
  1386. unsigned long val;
  1387. switch (inst.M33.cr3) {
  1388. case 65:
  1389. val = vcpu_get_ivr(vcpu);
  1390. vcpu_set_gr(vcpu, tgt, val, 0);
  1391. break;
  1392. case 67:
  1393. vcpu_set_gr(vcpu, tgt, 0L, 0);
  1394. break;
  1395. default:
  1396. val = VCPU(vcpu, vcr[inst.M33.cr3]);
  1397. vcpu_set_gr(vcpu, tgt, val, 0);
  1398. break;
  1399. }
  1400. return 0;
  1401. }
  1402. void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
  1403. {
  1404. unsigned long mask;
  1405. struct kvm_pt_regs *regs;
  1406. struct ia64_psr old_psr, new_psr;
  1407. old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1408. regs = vcpu_regs(vcpu);
  1409. /* We only support guest as:
  1410. * vpsr.pk = 0
  1411. * vpsr.is = 0
  1412. * Otherwise panic
  1413. */
  1414. if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
  1415. panic_vm(vcpu);
  1416. /*
  1417. * For those IA64_PSR bits: id/da/dd/ss/ed/ia
  1418. * Since these bits will become 0, after success execution of each
  1419. * instruction, we will change set them to mIA64_PSR
  1420. */
  1421. VCPU(vcpu, vpsr) = val
  1422. & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
  1423. IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
  1424. if (!old_psr.i && (val & IA64_PSR_I)) {
  1425. /* vpsr.i 0->1 */
  1426. vcpu->arch.irq_check = 1;
  1427. }
  1428. new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1429. /*
  1430. * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
  1431. * , except for the following bits:
  1432. * ic/i/dt/si/rt/mc/it/bn/vm
  1433. */
  1434. mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
  1435. IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
  1436. IA64_PSR_VM;
  1437. regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
  1438. check_mm_mode_switch(vcpu, old_psr, new_psr);
  1439. return ;
  1440. }
  1441. unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
  1442. {
  1443. struct ia64_psr vpsr;
  1444. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1445. vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
  1446. if (!vpsr.ic)
  1447. VCPU(vcpu, ifs) = regs->cr_ifs;
  1448. regs->cr_ifs = IA64_IFS_V;
  1449. return (IA64_NO_FAULT);
  1450. }
  1451. /**************************************************************************
  1452. VCPU banked general register access routines
  1453. **************************************************************************/
  1454. #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1455. do { \
  1456. __asm__ __volatile__ ( \
  1457. ";;extr.u %0 = %3,%6,16;;\n" \
  1458. "dep %1 = %0, %1, 0, 16;;\n" \
  1459. "st8 [%4] = %1\n" \
  1460. "extr.u %0 = %2, 16, 16;;\n" \
  1461. "dep %3 = %0, %3, %6, 16;;\n" \
  1462. "st8 [%5] = %3\n" \
  1463. ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
  1464. "r"(*runat), "r"(b1unat), "r"(runat), \
  1465. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1466. } while (0)
  1467. void vcpu_bsw0(struct kvm_vcpu *vcpu)
  1468. {
  1469. unsigned long i;
  1470. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1471. unsigned long *r = &regs->r16;
  1472. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1473. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1474. unsigned long *runat = &regs->eml_unat;
  1475. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1476. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1477. if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
  1478. for (i = 0; i < 16; i++) {
  1479. *b1++ = *r;
  1480. *r++ = *b0++;
  1481. }
  1482. vcpu_bsw0_unat(i, b0unat, b1unat, runat,
  1483. VMM_PT_REGS_R16_SLOT);
  1484. VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
  1485. }
  1486. }
  1487. #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
  1488. do { \
  1489. __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
  1490. "dep %1 = %0, %1, 16, 16;;\n" \
  1491. "st8 [%4] = %1\n" \
  1492. "extr.u %0 = %2, 0, 16;;\n" \
  1493. "dep %3 = %0, %3, %6, 16;;\n" \
  1494. "st8 [%5] = %3\n" \
  1495. ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
  1496. "r"(*runat), "r"(b0unat), "r"(runat), \
  1497. "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
  1498. } while (0)
  1499. void vcpu_bsw1(struct kvm_vcpu *vcpu)
  1500. {
  1501. unsigned long i;
  1502. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1503. unsigned long *r = &regs->r16;
  1504. unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
  1505. unsigned long *b1 = &VCPU(vcpu, vgr[0]);
  1506. unsigned long *runat = &regs->eml_unat;
  1507. unsigned long *b0unat = &VCPU(vcpu, vbnat);
  1508. unsigned long *b1unat = &VCPU(vcpu, vnat);
  1509. if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
  1510. for (i = 0; i < 16; i++) {
  1511. *b0++ = *r;
  1512. *r++ = *b1++;
  1513. }
  1514. vcpu_bsw1_unat(i, b0unat, b1unat, runat,
  1515. VMM_PT_REGS_R16_SLOT);
  1516. VCPU(vcpu, vpsr) |= IA64_PSR_BN;
  1517. }
  1518. }
  1519. void vcpu_rfi(struct kvm_vcpu *vcpu)
  1520. {
  1521. unsigned long ifs, psr;
  1522. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1523. psr = VCPU(vcpu, ipsr);
  1524. if (psr & IA64_PSR_BN)
  1525. vcpu_bsw1(vcpu);
  1526. else
  1527. vcpu_bsw0(vcpu);
  1528. vcpu_set_psr(vcpu, psr);
  1529. ifs = VCPU(vcpu, ifs);
  1530. if (ifs >> 63)
  1531. regs->cr_ifs = ifs;
  1532. regs->cr_iip = VCPU(vcpu, iip);
  1533. }
  1534. /*
  1535. VPSR can't keep track of below bits of guest PSR
  1536. This function gets guest PSR
  1537. */
  1538. unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
  1539. {
  1540. unsigned long mask;
  1541. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1542. mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
  1543. IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
  1544. return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
  1545. }
  1546. void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
  1547. {
  1548. unsigned long vpsr;
  1549. unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
  1550. | inst.M44.imm;
  1551. vpsr = vcpu_get_psr(vcpu);
  1552. vpsr &= (~imm24);
  1553. vcpu_set_psr(vcpu, vpsr);
  1554. }
  1555. void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
  1556. {
  1557. unsigned long vpsr;
  1558. unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
  1559. | inst.M44.imm;
  1560. vpsr = vcpu_get_psr(vcpu);
  1561. vpsr |= imm24;
  1562. vcpu_set_psr(vcpu, vpsr);
  1563. }
  1564. /* Generate Mask
  1565. * Parameter:
  1566. * bit -- starting bit
  1567. * len -- how many bits
  1568. */
  1569. #define MASK(bit,len) \
  1570. ({ \
  1571. __u64 ret; \
  1572. \
  1573. __asm __volatile("dep %0=-1, r0, %1, %2"\
  1574. : "=r" (ret): \
  1575. "M" (bit), \
  1576. "M" (len)); \
  1577. ret; \
  1578. })
  1579. void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
  1580. {
  1581. val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
  1582. vcpu_set_psr(vcpu, val);
  1583. }
  1584. void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1585. {
  1586. unsigned long val;
  1587. val = vcpu_get_gr(vcpu, inst.M35.r2);
  1588. vcpu_set_psr_l(vcpu, val);
  1589. }
  1590. void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
  1591. {
  1592. unsigned long val;
  1593. val = vcpu_get_psr(vcpu);
  1594. val = (val & MASK(0, 32)) | (val & MASK(35, 2));
  1595. vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
  1596. }
  1597. void vcpu_increment_iip(struct kvm_vcpu *vcpu)
  1598. {
  1599. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1600. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1601. if (ipsr->ri == 2) {
  1602. ipsr->ri = 0;
  1603. regs->cr_iip += 16;
  1604. } else
  1605. ipsr->ri++;
  1606. }
  1607. void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
  1608. {
  1609. struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  1610. struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
  1611. if (ipsr->ri == 0) {
  1612. ipsr->ri = 2;
  1613. regs->cr_iip -= 16;
  1614. } else
  1615. ipsr->ri--;
  1616. }
  1617. /** Emulate a privileged operation.
  1618. *
  1619. *
  1620. * @param vcpu virtual cpu
  1621. * @cause the reason cause virtualization fault
  1622. * @opcode the instruction code which cause virtualization fault
  1623. */
  1624. void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
  1625. {
  1626. unsigned long status, cause, opcode ;
  1627. INST64 inst;
  1628. status = IA64_NO_FAULT;
  1629. cause = VMX(vcpu, cause);
  1630. opcode = VMX(vcpu, opcode);
  1631. inst.inst = opcode;
  1632. /*
  1633. * Switch to actual virtual rid in rr0 and rr4,
  1634. * which is required by some tlb related instructions.
  1635. */
  1636. prepare_if_physical_mode(vcpu);
  1637. switch (cause) {
  1638. case EVENT_RSM:
  1639. kvm_rsm(vcpu, inst);
  1640. break;
  1641. case EVENT_SSM:
  1642. kvm_ssm(vcpu, inst);
  1643. break;
  1644. case EVENT_MOV_TO_PSR:
  1645. kvm_mov_to_psr(vcpu, inst);
  1646. break;
  1647. case EVENT_MOV_FROM_PSR:
  1648. kvm_mov_from_psr(vcpu, inst);
  1649. break;
  1650. case EVENT_MOV_FROM_CR:
  1651. kvm_mov_from_cr(vcpu, inst);
  1652. break;
  1653. case EVENT_MOV_TO_CR:
  1654. kvm_mov_to_cr(vcpu, inst);
  1655. break;
  1656. case EVENT_BSW_0:
  1657. vcpu_bsw0(vcpu);
  1658. break;
  1659. case EVENT_BSW_1:
  1660. vcpu_bsw1(vcpu);
  1661. break;
  1662. case EVENT_COVER:
  1663. vcpu_cover(vcpu);
  1664. break;
  1665. case EVENT_RFI:
  1666. vcpu_rfi(vcpu);
  1667. break;
  1668. case EVENT_ITR_D:
  1669. kvm_itr_d(vcpu, inst);
  1670. break;
  1671. case EVENT_ITR_I:
  1672. kvm_itr_i(vcpu, inst);
  1673. break;
  1674. case EVENT_PTR_D:
  1675. kvm_ptr_d(vcpu, inst);
  1676. break;
  1677. case EVENT_PTR_I:
  1678. kvm_ptr_i(vcpu, inst);
  1679. break;
  1680. case EVENT_ITC_D:
  1681. kvm_itc_d(vcpu, inst);
  1682. break;
  1683. case EVENT_ITC_I:
  1684. kvm_itc_i(vcpu, inst);
  1685. break;
  1686. case EVENT_PTC_L:
  1687. kvm_ptc_l(vcpu, inst);
  1688. break;
  1689. case EVENT_PTC_G:
  1690. kvm_ptc_g(vcpu, inst);
  1691. break;
  1692. case EVENT_PTC_GA:
  1693. kvm_ptc_ga(vcpu, inst);
  1694. break;
  1695. case EVENT_PTC_E:
  1696. kvm_ptc_e(vcpu, inst);
  1697. break;
  1698. case EVENT_MOV_TO_RR:
  1699. kvm_mov_to_rr(vcpu, inst);
  1700. break;
  1701. case EVENT_MOV_FROM_RR:
  1702. kvm_mov_from_rr(vcpu, inst);
  1703. break;
  1704. case EVENT_THASH:
  1705. kvm_thash(vcpu, inst);
  1706. break;
  1707. case EVENT_TTAG:
  1708. kvm_ttag(vcpu, inst);
  1709. break;
  1710. case EVENT_TPA:
  1711. status = kvm_tpa(vcpu, inst);
  1712. break;
  1713. case EVENT_TAK:
  1714. kvm_tak(vcpu, inst);
  1715. break;
  1716. case EVENT_MOV_TO_AR_IMM:
  1717. kvm_mov_to_ar_imm(vcpu, inst);
  1718. break;
  1719. case EVENT_MOV_TO_AR:
  1720. kvm_mov_to_ar_reg(vcpu, inst);
  1721. break;
  1722. case EVENT_MOV_FROM_AR:
  1723. kvm_mov_from_ar_reg(vcpu, inst);
  1724. break;
  1725. case EVENT_MOV_TO_DBR:
  1726. kvm_mov_to_dbr(vcpu, inst);
  1727. break;
  1728. case EVENT_MOV_TO_IBR:
  1729. kvm_mov_to_ibr(vcpu, inst);
  1730. break;
  1731. case EVENT_MOV_TO_PMC:
  1732. kvm_mov_to_pmc(vcpu, inst);
  1733. break;
  1734. case EVENT_MOV_TO_PMD:
  1735. kvm_mov_to_pmd(vcpu, inst);
  1736. break;
  1737. case EVENT_MOV_TO_PKR:
  1738. kvm_mov_to_pkr(vcpu, inst);
  1739. break;
  1740. case EVENT_MOV_FROM_DBR:
  1741. kvm_mov_from_dbr(vcpu, inst);
  1742. break;
  1743. case EVENT_MOV_FROM_IBR:
  1744. kvm_mov_from_ibr(vcpu, inst);
  1745. break;
  1746. case EVENT_MOV_FROM_PMC:
  1747. kvm_mov_from_pmc(vcpu, inst);
  1748. break;
  1749. case EVENT_MOV_FROM_PKR:
  1750. kvm_mov_from_pkr(vcpu, inst);
  1751. break;
  1752. case EVENT_MOV_FROM_CPUID:
  1753. kvm_mov_from_cpuid(vcpu, inst);
  1754. break;
  1755. case EVENT_VMSW:
  1756. status = IA64_FAULT;
  1757. break;
  1758. default:
  1759. break;
  1760. };
  1761. /*Assume all status is NO_FAULT ?*/
  1762. if (status == IA64_NO_FAULT && cause != EVENT_RFI)
  1763. vcpu_increment_iip(vcpu);
  1764. recover_if_physical_mode(vcpu);
  1765. }
  1766. void init_vcpu(struct kvm_vcpu *vcpu)
  1767. {
  1768. int i;
  1769. vcpu->arch.mode_flags = GUEST_IN_PHY;
  1770. VMX(vcpu, vrr[0]) = 0x38;
  1771. VMX(vcpu, vrr[1]) = 0x38;
  1772. VMX(vcpu, vrr[2]) = 0x38;
  1773. VMX(vcpu, vrr[3]) = 0x38;
  1774. VMX(vcpu, vrr[4]) = 0x38;
  1775. VMX(vcpu, vrr[5]) = 0x38;
  1776. VMX(vcpu, vrr[6]) = 0x38;
  1777. VMX(vcpu, vrr[7]) = 0x38;
  1778. VCPU(vcpu, vpsr) = IA64_PSR_BN;
  1779. VCPU(vcpu, dcr) = 0;
  1780. /* pta.size must not be 0. The minimum is 15 (32k) */
  1781. VCPU(vcpu, pta) = 15 << 2;
  1782. VCPU(vcpu, itv) = 0x10000;
  1783. VCPU(vcpu, itm) = 0;
  1784. VMX(vcpu, last_itc) = 0;
  1785. VCPU(vcpu, lid) = VCPU_LID(vcpu);
  1786. VCPU(vcpu, ivr) = 0;
  1787. VCPU(vcpu, tpr) = 0x10000;
  1788. VCPU(vcpu, eoi) = 0;
  1789. VCPU(vcpu, irr[0]) = 0;
  1790. VCPU(vcpu, irr[1]) = 0;
  1791. VCPU(vcpu, irr[2]) = 0;
  1792. VCPU(vcpu, irr[3]) = 0;
  1793. VCPU(vcpu, pmv) = 0x10000;
  1794. VCPU(vcpu, cmcv) = 0x10000;
  1795. VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
  1796. VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
  1797. update_vhpi(vcpu, NULL_VECTOR);
  1798. VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
  1799. for (i = 0; i < 4; i++)
  1800. VLSAPIC_INSVC(vcpu, i) = 0;
  1801. }
  1802. void kvm_init_all_rr(struct kvm_vcpu *vcpu)
  1803. {
  1804. unsigned long psr;
  1805. local_irq_save(psr);
  1806. /* WARNING: not allow co-exist of both virtual mode and physical
  1807. * mode in same region
  1808. */
  1809. vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
  1810. vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
  1811. if (is_physical_mode(vcpu)) {
  1812. if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
  1813. panic_vm(vcpu);
  1814. ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
  1815. ia64_dv_serialize_data();
  1816. ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
  1817. ia64_dv_serialize_data();
  1818. } else {
  1819. ia64_set_rr((VRN0 << VRN_SHIFT),
  1820. vcpu->arch.metaphysical_saved_rr0);
  1821. ia64_dv_serialize_data();
  1822. ia64_set_rr((VRN4 << VRN_SHIFT),
  1823. vcpu->arch.metaphysical_saved_rr4);
  1824. ia64_dv_serialize_data();
  1825. }
  1826. ia64_set_rr((VRN1 << VRN_SHIFT),
  1827. vrrtomrr(VMX(vcpu, vrr[VRN1])));
  1828. ia64_dv_serialize_data();
  1829. ia64_set_rr((VRN2 << VRN_SHIFT),
  1830. vrrtomrr(VMX(vcpu, vrr[VRN2])));
  1831. ia64_dv_serialize_data();
  1832. ia64_set_rr((VRN3 << VRN_SHIFT),
  1833. vrrtomrr(VMX(vcpu, vrr[VRN3])));
  1834. ia64_dv_serialize_data();
  1835. ia64_set_rr((VRN5 << VRN_SHIFT),
  1836. vrrtomrr(VMX(vcpu, vrr[VRN5])));
  1837. ia64_dv_serialize_data();
  1838. ia64_set_rr((VRN7 << VRN_SHIFT),
  1839. vrrtomrr(VMX(vcpu, vrr[VRN7])));
  1840. ia64_dv_serialize_data();
  1841. ia64_srlz_d();
  1842. ia64_set_psr(psr);
  1843. }
  1844. int vmm_entry(void)
  1845. {
  1846. struct kvm_vcpu *v;
  1847. v = current_vcpu;
  1848. ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
  1849. 0, 0, 0, 0, 0, 0);
  1850. kvm_init_vtlb(v);
  1851. kvm_init_vhpt(v);
  1852. init_vcpu(v);
  1853. kvm_init_all_rr(v);
  1854. vmm_reset_entry();
  1855. return 0;
  1856. }
  1857. void panic_vm(struct kvm_vcpu *v)
  1858. {
  1859. struct exit_ctl_data *p = &v->arch.exit_data;
  1860. p->exit_reason = EXIT_REASON_VM_PANIC;
  1861. vmm_transition(v);
  1862. /*Never to return*/
  1863. while (1);
  1864. }