optvfault.S 19 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * arch/ia64/kvm/optvfault.S
  3. * optimize virtualization fault handler
  4. *
  5. * Copyright (C) 2006 Intel Co
  6. * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
  7. * Copyright (C) 2008 Intel Co
  8. * Add the support for Tukwila processors.
  9. * Xiantao Zhang <xiantao.zhang@intel.com>
  10. */
  11. #include <asm/asmmacro.h>
  12. #include <asm/processor.h>
  13. #include "vti.h"
  14. #include "asm-offsets.h"
  15. #define ACCE_MOV_FROM_AR
  16. #define ACCE_MOV_FROM_RR
  17. #define ACCE_MOV_TO_RR
  18. #define ACCE_RSM
  19. #define ACCE_SSM
  20. #define ACCE_MOV_TO_PSR
  21. #define ACCE_THASH
  22. #define VMX_VPS_SYNC_READ \
  23. add r16=VMM_VPD_BASE_OFFSET,r21; \
  24. mov r17 = b0; \
  25. mov r18 = r24; \
  26. mov r19 = r25; \
  27. mov r20 = r31; \
  28. ;; \
  29. {.mii; \
  30. ld8 r16 = [r16]; \
  31. nop 0x0; \
  32. mov r24 = ip; \
  33. ;; \
  34. }; \
  35. {.mmb; \
  36. add r24=0x20, r24; \
  37. mov r25 =r16; \
  38. br.sptk.many kvm_vps_sync_read; \
  39. }; \
  40. mov b0 = r17; \
  41. mov r24 = r18; \
  42. mov r25 = r19; \
  43. mov r31 = r20
  44. ENTRY(kvm_vps_entry)
  45. adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
  46. ;;
  47. ld8 r29 = [r29]
  48. ;;
  49. add r29 = r29, r30
  50. ;;
  51. mov b0 = r29
  52. br.sptk.many b0
  53. END(kvm_vps_entry)
  54. /*
  55. * Inputs:
  56. * r24 : return address
  57. * r25 : vpd
  58. * r29 : scratch
  59. *
  60. */
  61. GLOBAL_ENTRY(kvm_vps_sync_read)
  62. movl r30 = PAL_VPS_SYNC_READ
  63. ;;
  64. br.sptk.many kvm_vps_entry
  65. END(kvm_vps_sync_read)
  66. /*
  67. * Inputs:
  68. * r24 : return address
  69. * r25 : vpd
  70. * r29 : scratch
  71. *
  72. */
  73. GLOBAL_ENTRY(kvm_vps_sync_write)
  74. movl r30 = PAL_VPS_SYNC_WRITE
  75. ;;
  76. br.sptk.many kvm_vps_entry
  77. END(kvm_vps_sync_write)
  78. /*
  79. * Inputs:
  80. * r23 : pr
  81. * r24 : guest b0
  82. * r25 : vpd
  83. *
  84. */
  85. GLOBAL_ENTRY(kvm_vps_resume_normal)
  86. movl r30 = PAL_VPS_RESUME_NORMAL
  87. ;;
  88. mov pr=r23,-2
  89. br.sptk.many kvm_vps_entry
  90. END(kvm_vps_resume_normal)
  91. /*
  92. * Inputs:
  93. * r23 : pr
  94. * r24 : guest b0
  95. * r25 : vpd
  96. * r17 : isr
  97. */
  98. GLOBAL_ENTRY(kvm_vps_resume_handler)
  99. movl r30 = PAL_VPS_RESUME_HANDLER
  100. ;;
  101. ld8 r26=[r25]
  102. shr r17=r17,IA64_ISR_IR_BIT
  103. ;;
  104. dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
  105. mov pr=r23,-2
  106. br.sptk.many kvm_vps_entry
  107. END(kvm_vps_resume_handler)
  108. //mov r1=ar3
  109. GLOBAL_ENTRY(kvm_asm_mov_from_ar)
  110. #ifndef ACCE_MOV_FROM_AR
  111. br.many kvm_virtualization_fault_back
  112. #endif
  113. add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
  114. add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
  115. extr.u r17=r25,6,7
  116. ;;
  117. ld8 r18=[r18]
  118. mov r19=ar.itc
  119. mov r24=b0
  120. ;;
  121. add r19=r19,r18
  122. addl r20=@gprel(asm_mov_to_reg),gp
  123. ;;
  124. st8 [r16] = r19
  125. adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
  126. shladd r17=r17,4,r20
  127. ;;
  128. mov b0=r17
  129. br.sptk.few b0
  130. ;;
  131. END(kvm_asm_mov_from_ar)
  132. // mov r1=rr[r3]
  133. GLOBAL_ENTRY(kvm_asm_mov_from_rr)
  134. #ifndef ACCE_MOV_FROM_RR
  135. br.many kvm_virtualization_fault_back
  136. #endif
  137. extr.u r16=r25,20,7
  138. extr.u r17=r25,6,7
  139. addl r20=@gprel(asm_mov_from_reg),gp
  140. ;;
  141. adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
  142. shladd r16=r16,4,r20
  143. mov r24=b0
  144. ;;
  145. add r27=VMM_VCPU_VRR0_OFFSET,r21
  146. mov b0=r16
  147. br.many b0
  148. ;;
  149. kvm_asm_mov_from_rr_back_1:
  150. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  151. adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
  152. shr.u r26=r19,61
  153. ;;
  154. shladd r17=r17,4,r22
  155. shladd r27=r26,3,r27
  156. ;;
  157. ld8 r19=[r27]
  158. mov b0=r17
  159. br.many b0
  160. END(kvm_asm_mov_from_rr)
  161. // mov rr[r3]=r2
  162. GLOBAL_ENTRY(kvm_asm_mov_to_rr)
  163. #ifndef ACCE_MOV_TO_RR
  164. br.many kvm_virtualization_fault_back
  165. #endif
  166. extr.u r16=r25,20,7
  167. extr.u r17=r25,13,7
  168. addl r20=@gprel(asm_mov_from_reg),gp
  169. ;;
  170. adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
  171. shladd r16=r16,4,r20
  172. mov r22=b0
  173. ;;
  174. add r27=VMM_VCPU_VRR0_OFFSET,r21
  175. mov b0=r16
  176. br.many b0
  177. ;;
  178. kvm_asm_mov_to_rr_back_1:
  179. adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
  180. shr.u r23=r19,61
  181. shladd r17=r17,4,r20
  182. ;;
  183. //if rr6, go back
  184. cmp.eq p6,p0=6,r23
  185. mov b0=r22
  186. (p6) br.cond.dpnt.many kvm_virtualization_fault_back
  187. ;;
  188. mov r28=r19
  189. mov b0=r17
  190. br.many b0
  191. kvm_asm_mov_to_rr_back_2:
  192. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  193. shladd r27=r23,3,r27
  194. ;; // vrr.rid<<4 |0xe
  195. st8 [r27]=r19
  196. mov b0=r30
  197. ;;
  198. extr.u r16=r19,8,26
  199. extr.u r18 =r19,2,6
  200. mov r17 =0xe
  201. ;;
  202. shladd r16 = r16, 4, r17
  203. extr.u r19 =r19,0,8
  204. ;;
  205. shl r16 = r16,8
  206. ;;
  207. add r19 = r19, r16
  208. ;; //set ve 1
  209. dep r19=-1,r19,0,1
  210. cmp.lt p6,p0=14,r18
  211. ;;
  212. (p6) mov r18=14
  213. ;;
  214. (p6) dep r19=r18,r19,2,6
  215. ;;
  216. cmp.eq p6,p0=0,r23
  217. ;;
  218. cmp.eq.or p6,p0=4,r23
  219. ;;
  220. adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  221. (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  222. ;;
  223. ld4 r16=[r16]
  224. cmp.eq p7,p0=r0,r0
  225. (p6) shladd r17=r23,1,r17
  226. ;;
  227. (p6) st8 [r17]=r19
  228. (p6) tbit.nz p6,p7=r16,0
  229. ;;
  230. (p7) mov rr[r28]=r19
  231. mov r24=r22
  232. br.many b0
  233. END(kvm_asm_mov_to_rr)
  234. //rsm
  235. GLOBAL_ENTRY(kvm_asm_rsm)
  236. #ifndef ACCE_RSM
  237. br.many kvm_virtualization_fault_back
  238. #endif
  239. VMX_VPS_SYNC_READ
  240. ;;
  241. extr.u r26=r25,6,21
  242. extr.u r27=r25,31,2
  243. ;;
  244. extr.u r28=r25,36,1
  245. dep r26=r27,r26,21,2
  246. ;;
  247. add r17=VPD_VPSR_START_OFFSET,r16
  248. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  249. //r26 is imm24
  250. dep r26=r28,r26,23,1
  251. ;;
  252. ld8 r18=[r17]
  253. movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
  254. ld4 r23=[r22]
  255. sub r27=-1,r26
  256. mov r24=b0
  257. ;;
  258. mov r20=cr.ipsr
  259. or r28=r27,r28
  260. and r19=r18,r27
  261. ;;
  262. st8 [r17]=r19
  263. and r20=r20,r28
  264. /* Comment it out due to short of fp lazy alorgithm support
  265. adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
  266. ;;
  267. ld8 r27=[r27]
  268. ;;
  269. tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
  270. ;;
  271. (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
  272. */
  273. ;;
  274. mov cr.ipsr=r20
  275. tbit.nz p6,p0=r23,0
  276. ;;
  277. tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
  278. (p6) br.dptk kvm_resume_to_guest_with_sync
  279. ;;
  280. add r26=VMM_VCPU_META_RR0_OFFSET,r21
  281. add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
  282. dep r23=-1,r23,0,1
  283. ;;
  284. ld8 r26=[r26]
  285. ld8 r27=[r27]
  286. st4 [r22]=r23
  287. dep.z r28=4,61,3
  288. ;;
  289. mov rr[r0]=r26
  290. ;;
  291. mov rr[r28]=r27
  292. ;;
  293. srlz.d
  294. br.many kvm_resume_to_guest_with_sync
  295. END(kvm_asm_rsm)
  296. //ssm
  297. GLOBAL_ENTRY(kvm_asm_ssm)
  298. #ifndef ACCE_SSM
  299. br.many kvm_virtualization_fault_back
  300. #endif
  301. VMX_VPS_SYNC_READ
  302. ;;
  303. extr.u r26=r25,6,21
  304. extr.u r27=r25,31,2
  305. ;;
  306. extr.u r28=r25,36,1
  307. dep r26=r27,r26,21,2
  308. ;; //r26 is imm24
  309. add r27=VPD_VPSR_START_OFFSET,r16
  310. dep r26=r28,r26,23,1
  311. ;; //r19 vpsr
  312. ld8 r29=[r27]
  313. mov r24=b0
  314. ;;
  315. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  316. mov r20=cr.ipsr
  317. or r19=r29,r26
  318. ;;
  319. ld4 r23=[r22]
  320. st8 [r27]=r19
  321. or r20=r20,r26
  322. ;;
  323. mov cr.ipsr=r20
  324. movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
  325. ;;
  326. and r19=r28,r19
  327. tbit.z p6,p0=r23,0
  328. ;;
  329. cmp.ne.or p6,p0=r28,r19
  330. (p6) br.dptk kvm_asm_ssm_1
  331. ;;
  332. add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  333. add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
  334. dep r23=0,r23,0,1
  335. ;;
  336. ld8 r26=[r26]
  337. ld8 r27=[r27]
  338. st4 [r22]=r23
  339. dep.z r28=4,61,3
  340. ;;
  341. mov rr[r0]=r26
  342. ;;
  343. mov rr[r28]=r27
  344. ;;
  345. srlz.d
  346. ;;
  347. kvm_asm_ssm_1:
  348. tbit.nz p6,p0=r29,IA64_PSR_I_BIT
  349. ;;
  350. tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
  351. (p6) br.dptk kvm_resume_to_guest_with_sync
  352. ;;
  353. add r29=VPD_VTPR_START_OFFSET,r16
  354. add r30=VPD_VHPI_START_OFFSET,r16
  355. ;;
  356. ld8 r29=[r29]
  357. ld8 r30=[r30]
  358. ;;
  359. extr.u r17=r29,4,4
  360. extr.u r18=r29,16,1
  361. ;;
  362. dep r17=r18,r17,4,1
  363. ;;
  364. cmp.gt p6,p0=r30,r17
  365. (p6) br.dpnt.few kvm_asm_dispatch_vexirq
  366. br.many kvm_resume_to_guest_with_sync
  367. END(kvm_asm_ssm)
  368. //mov psr.l=r2
  369. GLOBAL_ENTRY(kvm_asm_mov_to_psr)
  370. #ifndef ACCE_MOV_TO_PSR
  371. br.many kvm_virtualization_fault_back
  372. #endif
  373. VMX_VPS_SYNC_READ
  374. ;;
  375. extr.u r26=r25,13,7 //r2
  376. addl r20=@gprel(asm_mov_from_reg),gp
  377. ;;
  378. adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
  379. shladd r26=r26,4,r20
  380. mov r24=b0
  381. ;;
  382. add r27=VPD_VPSR_START_OFFSET,r16
  383. mov b0=r26
  384. br.many b0
  385. ;;
  386. kvm_asm_mov_to_psr_back:
  387. ld8 r17=[r27]
  388. add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
  389. dep r19=0,r19,32,32
  390. ;;
  391. ld4 r23=[r22]
  392. dep r18=0,r17,0,32
  393. ;;
  394. add r30=r18,r19
  395. movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
  396. ;;
  397. st8 [r27]=r30
  398. and r27=r28,r30
  399. and r29=r28,r17
  400. ;;
  401. cmp.eq p5,p0=r29,r27
  402. cmp.eq p6,p7=r28,r27
  403. (p5) br.many kvm_asm_mov_to_psr_1
  404. ;;
  405. //virtual to physical
  406. (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
  407. (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
  408. (p7) dep r23=-1,r23,0,1
  409. ;;
  410. //physical to virtual
  411. (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
  412. (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
  413. (p6) dep r23=0,r23,0,1
  414. ;;
  415. ld8 r26=[r26]
  416. ld8 r27=[r27]
  417. st4 [r22]=r23
  418. dep.z r28=4,61,3
  419. ;;
  420. mov rr[r0]=r26
  421. ;;
  422. mov rr[r28]=r27
  423. ;;
  424. srlz.d
  425. ;;
  426. kvm_asm_mov_to_psr_1:
  427. mov r20=cr.ipsr
  428. movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
  429. ;;
  430. or r19=r19,r28
  431. dep r20=0,r20,0,32
  432. ;;
  433. add r20=r19,r20
  434. mov b0=r24
  435. ;;
  436. /* Comment it out due to short of fp lazy algorithm support
  437. adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
  438. ;;
  439. ld8 r27=[r27]
  440. ;;
  441. tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
  442. ;;
  443. (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
  444. ;;
  445. */
  446. mov cr.ipsr=r20
  447. cmp.ne p6,p0=r0,r0
  448. ;;
  449. tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
  450. tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
  451. (p6) br.dpnt.few kvm_resume_to_guest_with_sync
  452. ;;
  453. add r29=VPD_VTPR_START_OFFSET,r16
  454. add r30=VPD_VHPI_START_OFFSET,r16
  455. ;;
  456. ld8 r29=[r29]
  457. ld8 r30=[r30]
  458. ;;
  459. extr.u r17=r29,4,4
  460. extr.u r18=r29,16,1
  461. ;;
  462. dep r17=r18,r17,4,1
  463. ;;
  464. cmp.gt p6,p0=r30,r17
  465. (p6) br.dpnt.few kvm_asm_dispatch_vexirq
  466. br.many kvm_resume_to_guest_with_sync
  467. END(kvm_asm_mov_to_psr)
  468. ENTRY(kvm_asm_dispatch_vexirq)
  469. //increment iip
  470. mov r17 = b0
  471. mov r18 = r31
  472. {.mii
  473. add r25=VMM_VPD_BASE_OFFSET,r21
  474. nop 0x0
  475. mov r24 = ip
  476. ;;
  477. }
  478. {.mmb
  479. add r24 = 0x20, r24
  480. ld8 r25 = [r25]
  481. br.sptk.many kvm_vps_sync_write
  482. }
  483. mov b0 =r17
  484. mov r16=cr.ipsr
  485. mov r31 = r18
  486. mov r19 = 37
  487. ;;
  488. extr.u r17=r16,IA64_PSR_RI_BIT,2
  489. tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
  490. ;;
  491. (p6) mov r18=cr.iip
  492. (p6) mov r17=r0
  493. (p7) add r17=1,r17
  494. ;;
  495. (p6) add r18=0x10,r18
  496. dep r16=r17,r16,IA64_PSR_RI_BIT,2
  497. ;;
  498. (p6) mov cr.iip=r18
  499. mov cr.ipsr=r16
  500. mov r30 =1
  501. br.many kvm_dispatch_vexirq
  502. END(kvm_asm_dispatch_vexirq)
  503. // thash
  504. // TODO: add support when pta.vf = 1
  505. GLOBAL_ENTRY(kvm_asm_thash)
  506. #ifndef ACCE_THASH
  507. br.many kvm_virtualization_fault_back
  508. #endif
  509. extr.u r17=r25,20,7 // get r3 from opcode in r25
  510. extr.u r18=r25,6,7 // get r1 from opcode in r25
  511. addl r20=@gprel(asm_mov_from_reg),gp
  512. ;;
  513. adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
  514. shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
  515. adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
  516. ;;
  517. mov r24=b0
  518. ;;
  519. ld8 r16=[r16] // get VPD addr
  520. mov b0=r17
  521. br.many b0 // r19 return value
  522. ;;
  523. kvm_asm_thash_back1:
  524. shr.u r23=r19,61 // get RR number
  525. adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
  526. adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
  527. ;;
  528. shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
  529. ld8 r17=[r16] // get PTA
  530. mov r26=1
  531. ;;
  532. extr.u r29=r17,2,6 // get pta.size
  533. ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
  534. ;;
  535. mov b0=r24
  536. //Fallback to C if pta.vf is set
  537. tbit.nz p6,p0=r17, 8
  538. ;;
  539. (p6) mov r24=EVENT_THASH
  540. (p6) br.cond.dpnt.many kvm_virtualization_fault_back
  541. extr.u r28=r28,2,6 // get rr.ps
  542. shl r22=r26,r29 // 1UL << pta.size
  543. ;;
  544. shr.u r23=r19,r28 // vaddr >> rr.ps
  545. adds r26=3,r29 // pta.size + 3
  546. shl r27=r17,3 // pta << 3
  547. ;;
  548. shl r23=r23,3 // (vaddr >> rr.ps) << 3
  549. shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
  550. movl r16=7<<61
  551. ;;
  552. adds r22=-1,r22 // (1UL << pta.size) - 1
  553. shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
  554. and r19=r19,r16 // vaddr & VRN_MASK
  555. ;;
  556. and r22=r22,r23 // vhpt_offset
  557. or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
  558. adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
  559. ;;
  560. or r19=r19,r22 // calc pval
  561. shladd r17=r18,4,r26
  562. adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
  563. ;;
  564. mov b0=r17
  565. br.many b0
  566. END(kvm_asm_thash)
  567. #define MOV_TO_REG0 \
  568. {; \
  569. nop.b 0x0; \
  570. nop.b 0x0; \
  571. nop.b 0x0; \
  572. ;; \
  573. };
  574. #define MOV_TO_REG(n) \
  575. {; \
  576. mov r##n##=r19; \
  577. mov b0=r30; \
  578. br.sptk.many b0; \
  579. ;; \
  580. };
  581. #define MOV_FROM_REG(n) \
  582. {; \
  583. mov r19=r##n##; \
  584. mov b0=r30; \
  585. br.sptk.many b0; \
  586. ;; \
  587. };
  588. #define MOV_TO_BANK0_REG(n) \
  589. ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
  590. {; \
  591. mov r26=r2; \
  592. mov r2=r19; \
  593. bsw.1; \
  594. ;; \
  595. }; \
  596. {; \
  597. mov r##n##=r2; \
  598. nop.b 0x0; \
  599. bsw.0; \
  600. ;; \
  601. }; \
  602. {; \
  603. mov r2=r26; \
  604. mov b0=r30; \
  605. br.sptk.many b0; \
  606. ;; \
  607. }; \
  608. END(asm_mov_to_bank0_reg##n##)
  609. #define MOV_FROM_BANK0_REG(n) \
  610. ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
  611. {; \
  612. mov r26=r2; \
  613. nop.b 0x0; \
  614. bsw.1; \
  615. ;; \
  616. }; \
  617. {; \
  618. mov r2=r##n##; \
  619. nop.b 0x0; \
  620. bsw.0; \
  621. ;; \
  622. }; \
  623. {; \
  624. mov r19=r2; \
  625. mov r2=r26; \
  626. mov b0=r30; \
  627. }; \
  628. {; \
  629. nop.b 0x0; \
  630. nop.b 0x0; \
  631. br.sptk.many b0; \
  632. ;; \
  633. }; \
  634. END(asm_mov_from_bank0_reg##n##)
  635. #define JMP_TO_MOV_TO_BANK0_REG(n) \
  636. {; \
  637. nop.b 0x0; \
  638. nop.b 0x0; \
  639. br.sptk.many asm_mov_to_bank0_reg##n##; \
  640. ;; \
  641. }
  642. #define JMP_TO_MOV_FROM_BANK0_REG(n) \
  643. {; \
  644. nop.b 0x0; \
  645. nop.b 0x0; \
  646. br.sptk.many asm_mov_from_bank0_reg##n##; \
  647. ;; \
  648. }
  649. MOV_FROM_BANK0_REG(16)
  650. MOV_FROM_BANK0_REG(17)
  651. MOV_FROM_BANK0_REG(18)
  652. MOV_FROM_BANK0_REG(19)
  653. MOV_FROM_BANK0_REG(20)
  654. MOV_FROM_BANK0_REG(21)
  655. MOV_FROM_BANK0_REG(22)
  656. MOV_FROM_BANK0_REG(23)
  657. MOV_FROM_BANK0_REG(24)
  658. MOV_FROM_BANK0_REG(25)
  659. MOV_FROM_BANK0_REG(26)
  660. MOV_FROM_BANK0_REG(27)
  661. MOV_FROM_BANK0_REG(28)
  662. MOV_FROM_BANK0_REG(29)
  663. MOV_FROM_BANK0_REG(30)
  664. MOV_FROM_BANK0_REG(31)
  665. // mov from reg table
  666. ENTRY(asm_mov_from_reg)
  667. MOV_FROM_REG(0)
  668. MOV_FROM_REG(1)
  669. MOV_FROM_REG(2)
  670. MOV_FROM_REG(3)
  671. MOV_FROM_REG(4)
  672. MOV_FROM_REG(5)
  673. MOV_FROM_REG(6)
  674. MOV_FROM_REG(7)
  675. MOV_FROM_REG(8)
  676. MOV_FROM_REG(9)
  677. MOV_FROM_REG(10)
  678. MOV_FROM_REG(11)
  679. MOV_FROM_REG(12)
  680. MOV_FROM_REG(13)
  681. MOV_FROM_REG(14)
  682. MOV_FROM_REG(15)
  683. JMP_TO_MOV_FROM_BANK0_REG(16)
  684. JMP_TO_MOV_FROM_BANK0_REG(17)
  685. JMP_TO_MOV_FROM_BANK0_REG(18)
  686. JMP_TO_MOV_FROM_BANK0_REG(19)
  687. JMP_TO_MOV_FROM_BANK0_REG(20)
  688. JMP_TO_MOV_FROM_BANK0_REG(21)
  689. JMP_TO_MOV_FROM_BANK0_REG(22)
  690. JMP_TO_MOV_FROM_BANK0_REG(23)
  691. JMP_TO_MOV_FROM_BANK0_REG(24)
  692. JMP_TO_MOV_FROM_BANK0_REG(25)
  693. JMP_TO_MOV_FROM_BANK0_REG(26)
  694. JMP_TO_MOV_FROM_BANK0_REG(27)
  695. JMP_TO_MOV_FROM_BANK0_REG(28)
  696. JMP_TO_MOV_FROM_BANK0_REG(29)
  697. JMP_TO_MOV_FROM_BANK0_REG(30)
  698. JMP_TO_MOV_FROM_BANK0_REG(31)
  699. MOV_FROM_REG(32)
  700. MOV_FROM_REG(33)
  701. MOV_FROM_REG(34)
  702. MOV_FROM_REG(35)
  703. MOV_FROM_REG(36)
  704. MOV_FROM_REG(37)
  705. MOV_FROM_REG(38)
  706. MOV_FROM_REG(39)
  707. MOV_FROM_REG(40)
  708. MOV_FROM_REG(41)
  709. MOV_FROM_REG(42)
  710. MOV_FROM_REG(43)
  711. MOV_FROM_REG(44)
  712. MOV_FROM_REG(45)
  713. MOV_FROM_REG(46)
  714. MOV_FROM_REG(47)
  715. MOV_FROM_REG(48)
  716. MOV_FROM_REG(49)
  717. MOV_FROM_REG(50)
  718. MOV_FROM_REG(51)
  719. MOV_FROM_REG(52)
  720. MOV_FROM_REG(53)
  721. MOV_FROM_REG(54)
  722. MOV_FROM_REG(55)
  723. MOV_FROM_REG(56)
  724. MOV_FROM_REG(57)
  725. MOV_FROM_REG(58)
  726. MOV_FROM_REG(59)
  727. MOV_FROM_REG(60)
  728. MOV_FROM_REG(61)
  729. MOV_FROM_REG(62)
  730. MOV_FROM_REG(63)
  731. MOV_FROM_REG(64)
  732. MOV_FROM_REG(65)
  733. MOV_FROM_REG(66)
  734. MOV_FROM_REG(67)
  735. MOV_FROM_REG(68)
  736. MOV_FROM_REG(69)
  737. MOV_FROM_REG(70)
  738. MOV_FROM_REG(71)
  739. MOV_FROM_REG(72)
  740. MOV_FROM_REG(73)
  741. MOV_FROM_REG(74)
  742. MOV_FROM_REG(75)
  743. MOV_FROM_REG(76)
  744. MOV_FROM_REG(77)
  745. MOV_FROM_REG(78)
  746. MOV_FROM_REG(79)
  747. MOV_FROM_REG(80)
  748. MOV_FROM_REG(81)
  749. MOV_FROM_REG(82)
  750. MOV_FROM_REG(83)
  751. MOV_FROM_REG(84)
  752. MOV_FROM_REG(85)
  753. MOV_FROM_REG(86)
  754. MOV_FROM_REG(87)
  755. MOV_FROM_REG(88)
  756. MOV_FROM_REG(89)
  757. MOV_FROM_REG(90)
  758. MOV_FROM_REG(91)
  759. MOV_FROM_REG(92)
  760. MOV_FROM_REG(93)
  761. MOV_FROM_REG(94)
  762. MOV_FROM_REG(95)
  763. MOV_FROM_REG(96)
  764. MOV_FROM_REG(97)
  765. MOV_FROM_REG(98)
  766. MOV_FROM_REG(99)
  767. MOV_FROM_REG(100)
  768. MOV_FROM_REG(101)
  769. MOV_FROM_REG(102)
  770. MOV_FROM_REG(103)
  771. MOV_FROM_REG(104)
  772. MOV_FROM_REG(105)
  773. MOV_FROM_REG(106)
  774. MOV_FROM_REG(107)
  775. MOV_FROM_REG(108)
  776. MOV_FROM_REG(109)
  777. MOV_FROM_REG(110)
  778. MOV_FROM_REG(111)
  779. MOV_FROM_REG(112)
  780. MOV_FROM_REG(113)
  781. MOV_FROM_REG(114)
  782. MOV_FROM_REG(115)
  783. MOV_FROM_REG(116)
  784. MOV_FROM_REG(117)
  785. MOV_FROM_REG(118)
  786. MOV_FROM_REG(119)
  787. MOV_FROM_REG(120)
  788. MOV_FROM_REG(121)
  789. MOV_FROM_REG(122)
  790. MOV_FROM_REG(123)
  791. MOV_FROM_REG(124)
  792. MOV_FROM_REG(125)
  793. MOV_FROM_REG(126)
  794. MOV_FROM_REG(127)
  795. END(asm_mov_from_reg)
  796. /* must be in bank 0
  797. * parameter:
  798. * r31: pr
  799. * r24: b0
  800. */
  801. ENTRY(kvm_resume_to_guest_with_sync)
  802. adds r19=VMM_VPD_BASE_OFFSET,r21
  803. mov r16 = r31
  804. mov r17 = r24
  805. ;;
  806. {.mii
  807. ld8 r25 =[r19]
  808. nop 0x0
  809. mov r24 = ip
  810. ;;
  811. }
  812. {.mmb
  813. add r24 =0x20, r24
  814. nop 0x0
  815. br.sptk.many kvm_vps_sync_write
  816. }
  817. mov r31 = r16
  818. mov r24 =r17
  819. ;;
  820. br.sptk.many kvm_resume_to_guest
  821. END(kvm_resume_to_guest_with_sync)
  822. ENTRY(kvm_resume_to_guest)
  823. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  824. ;;
  825. ld8 r1 =[r16]
  826. adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
  827. ;;
  828. mov r16=cr.ipsr
  829. ;;
  830. ld8 r20 = [r20]
  831. adds r19=VMM_VPD_BASE_OFFSET,r21
  832. ;;
  833. ld8 r25=[r19]
  834. extr.u r17=r16,IA64_PSR_RI_BIT,2
  835. tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
  836. ;;
  837. (p6) mov r18=cr.iip
  838. (p6) mov r17=r0
  839. ;;
  840. (p6) add r18=0x10,r18
  841. (p7) add r17=1,r17
  842. ;;
  843. (p6) mov cr.iip=r18
  844. dep r16=r17,r16,IA64_PSR_RI_BIT,2
  845. ;;
  846. mov cr.ipsr=r16
  847. adds r19= VPD_VPSR_START_OFFSET,r25
  848. add r28=PAL_VPS_RESUME_NORMAL,r20
  849. add r29=PAL_VPS_RESUME_HANDLER,r20
  850. ;;
  851. ld8 r19=[r19]
  852. mov b0=r29
  853. mov r27=cr.isr
  854. ;;
  855. tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
  856. shr r27=r27,IA64_ISR_IR_BIT
  857. ;;
  858. (p6) ld8 r26=[r25]
  859. (p7) mov b0=r28
  860. ;;
  861. (p6) dep r26=r27,r26,63,1
  862. mov pr=r31,-2
  863. br.sptk.many b0 // call pal service
  864. ;;
  865. END(kvm_resume_to_guest)
  866. MOV_TO_BANK0_REG(16)
  867. MOV_TO_BANK0_REG(17)
  868. MOV_TO_BANK0_REG(18)
  869. MOV_TO_BANK0_REG(19)
  870. MOV_TO_BANK0_REG(20)
  871. MOV_TO_BANK0_REG(21)
  872. MOV_TO_BANK0_REG(22)
  873. MOV_TO_BANK0_REG(23)
  874. MOV_TO_BANK0_REG(24)
  875. MOV_TO_BANK0_REG(25)
  876. MOV_TO_BANK0_REG(26)
  877. MOV_TO_BANK0_REG(27)
  878. MOV_TO_BANK0_REG(28)
  879. MOV_TO_BANK0_REG(29)
  880. MOV_TO_BANK0_REG(30)
  881. MOV_TO_BANK0_REG(31)
  882. // mov to reg table
  883. ENTRY(asm_mov_to_reg)
  884. MOV_TO_REG0
  885. MOV_TO_REG(1)
  886. MOV_TO_REG(2)
  887. MOV_TO_REG(3)
  888. MOV_TO_REG(4)
  889. MOV_TO_REG(5)
  890. MOV_TO_REG(6)
  891. MOV_TO_REG(7)
  892. MOV_TO_REG(8)
  893. MOV_TO_REG(9)
  894. MOV_TO_REG(10)
  895. MOV_TO_REG(11)
  896. MOV_TO_REG(12)
  897. MOV_TO_REG(13)
  898. MOV_TO_REG(14)
  899. MOV_TO_REG(15)
  900. JMP_TO_MOV_TO_BANK0_REG(16)
  901. JMP_TO_MOV_TO_BANK0_REG(17)
  902. JMP_TO_MOV_TO_BANK0_REG(18)
  903. JMP_TO_MOV_TO_BANK0_REG(19)
  904. JMP_TO_MOV_TO_BANK0_REG(20)
  905. JMP_TO_MOV_TO_BANK0_REG(21)
  906. JMP_TO_MOV_TO_BANK0_REG(22)
  907. JMP_TO_MOV_TO_BANK0_REG(23)
  908. JMP_TO_MOV_TO_BANK0_REG(24)
  909. JMP_TO_MOV_TO_BANK0_REG(25)
  910. JMP_TO_MOV_TO_BANK0_REG(26)
  911. JMP_TO_MOV_TO_BANK0_REG(27)
  912. JMP_TO_MOV_TO_BANK0_REG(28)
  913. JMP_TO_MOV_TO_BANK0_REG(29)
  914. JMP_TO_MOV_TO_BANK0_REG(30)
  915. JMP_TO_MOV_TO_BANK0_REG(31)
  916. MOV_TO_REG(32)
  917. MOV_TO_REG(33)
  918. MOV_TO_REG(34)
  919. MOV_TO_REG(35)
  920. MOV_TO_REG(36)
  921. MOV_TO_REG(37)
  922. MOV_TO_REG(38)
  923. MOV_TO_REG(39)
  924. MOV_TO_REG(40)
  925. MOV_TO_REG(41)
  926. MOV_TO_REG(42)
  927. MOV_TO_REG(43)
  928. MOV_TO_REG(44)
  929. MOV_TO_REG(45)
  930. MOV_TO_REG(46)
  931. MOV_TO_REG(47)
  932. MOV_TO_REG(48)
  933. MOV_TO_REG(49)
  934. MOV_TO_REG(50)
  935. MOV_TO_REG(51)
  936. MOV_TO_REG(52)
  937. MOV_TO_REG(53)
  938. MOV_TO_REG(54)
  939. MOV_TO_REG(55)
  940. MOV_TO_REG(56)
  941. MOV_TO_REG(57)
  942. MOV_TO_REG(58)
  943. MOV_TO_REG(59)
  944. MOV_TO_REG(60)
  945. MOV_TO_REG(61)
  946. MOV_TO_REG(62)
  947. MOV_TO_REG(63)
  948. MOV_TO_REG(64)
  949. MOV_TO_REG(65)
  950. MOV_TO_REG(66)
  951. MOV_TO_REG(67)
  952. MOV_TO_REG(68)
  953. MOV_TO_REG(69)
  954. MOV_TO_REG(70)
  955. MOV_TO_REG(71)
  956. MOV_TO_REG(72)
  957. MOV_TO_REG(73)
  958. MOV_TO_REG(74)
  959. MOV_TO_REG(75)
  960. MOV_TO_REG(76)
  961. MOV_TO_REG(77)
  962. MOV_TO_REG(78)
  963. MOV_TO_REG(79)
  964. MOV_TO_REG(80)
  965. MOV_TO_REG(81)
  966. MOV_TO_REG(82)
  967. MOV_TO_REG(83)
  968. MOV_TO_REG(84)
  969. MOV_TO_REG(85)
  970. MOV_TO_REG(86)
  971. MOV_TO_REG(87)
  972. MOV_TO_REG(88)
  973. MOV_TO_REG(89)
  974. MOV_TO_REG(90)
  975. MOV_TO_REG(91)
  976. MOV_TO_REG(92)
  977. MOV_TO_REG(93)
  978. MOV_TO_REG(94)
  979. MOV_TO_REG(95)
  980. MOV_TO_REG(96)
  981. MOV_TO_REG(97)
  982. MOV_TO_REG(98)
  983. MOV_TO_REG(99)
  984. MOV_TO_REG(100)
  985. MOV_TO_REG(101)
  986. MOV_TO_REG(102)
  987. MOV_TO_REG(103)
  988. MOV_TO_REG(104)
  989. MOV_TO_REG(105)
  990. MOV_TO_REG(106)
  991. MOV_TO_REG(107)
  992. MOV_TO_REG(108)
  993. MOV_TO_REG(109)
  994. MOV_TO_REG(110)
  995. MOV_TO_REG(111)
  996. MOV_TO_REG(112)
  997. MOV_TO_REG(113)
  998. MOV_TO_REG(114)
  999. MOV_TO_REG(115)
  1000. MOV_TO_REG(116)
  1001. MOV_TO_REG(117)
  1002. MOV_TO_REG(118)
  1003. MOV_TO_REG(119)
  1004. MOV_TO_REG(120)
  1005. MOV_TO_REG(121)
  1006. MOV_TO_REG(122)
  1007. MOV_TO_REG(123)
  1008. MOV_TO_REG(124)
  1009. MOV_TO_REG(125)
  1010. MOV_TO_REG(126)
  1011. MOV_TO_REG(127)
  1012. END(asm_mov_to_reg)