vmm_ivt.S 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424
  1. /*
  2. * /ia64/kvm_ivt.S
  3. *
  4. * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * David Mosberger <davidm@hpl.hp.com>
  7. * Copyright (C) 2000, 2002-2003 Intel Co
  8. * Asit Mallick <asit.k.mallick@intel.com>
  9. * Suresh Siddha <suresh.b.siddha@intel.com>
  10. * Kenneth Chen <kenneth.w.chen@intel.com>
  11. * Fenghua Yu <fenghua.yu@intel.com>
  12. *
  13. *
  14. * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
  15. * for SMP
  16. * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
  17. * handler now uses virtual PT.
  18. *
  19. * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
  20. * Supporting Intel virtualization architecture
  21. *
  22. */
  23. /*
  24. * This file defines the interruption vector table used by the CPU.
  25. * It does not include one entry per possible cause of interruption.
  26. *
  27. * The first 20 entries of the table contain 64 bundles each while the
  28. * remaining 48 entries contain only 16 bundles each.
  29. *
  30. * The 64 bundles are used to allow inlining the whole handler for
  31. * critical
  32. * interruptions like TLB misses.
  33. *
  34. * For each entry, the comment is as follows:
  35. *
  36. * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
  37. * (12,51)
  38. * entry offset ----/ / / /
  39. * /
  40. * entry number ---------/ / /
  41. * /
  42. * size of the entry -------------/ /
  43. * /
  44. * vector name -------------------------------------/
  45. * /
  46. * interruptions triggering this vector
  47. * ----------------------/
  48. *
  49. * The table is 32KB in size and must be aligned on 32KB
  50. * boundary.
  51. * (The CPU ignores the 15 lower bits of the address)
  52. *
  53. * Table is based upon EAS2.6 (Oct 1999)
  54. */
  55. #include <asm/asmmacro.h>
  56. #include <asm/cache.h>
  57. #include <asm/pgtable.h>
  58. #include "asm-offsets.h"
  59. #include "vcpu.h"
  60. #include "kvm_minstate.h"
  61. #include "vti.h"
  62. #if 1
  63. # define PSR_DEFAULT_BITS psr.ac
  64. #else
  65. # define PSR_DEFAULT_BITS 0
  66. #endif
  67. #define KVM_FAULT(n) \
  68. kvm_fault_##n:; \
  69. mov r19=n;; \
  70. br.sptk.many kvm_fault_##n; \
  71. ;; \
  72. #define KVM_REFLECT(n) \
  73. mov r31=pr; \
  74. mov r19=n; /* prepare to save predicates */ \
  75. mov r29=cr.ipsr; \
  76. ;; \
  77. tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
  78. (p7)br.sptk.many kvm_dispatch_reflection; \
  79. br.sptk.many kvm_panic; \
  80. GLOBAL_ENTRY(kvm_panic)
  81. br.sptk.many kvm_panic
  82. ;;
  83. END(kvm_panic)
  84. .section .text.ivt,"ax"
  85. .align 32768 // align on 32KB boundary
  86. .global kvm_ia64_ivt
  87. kvm_ia64_ivt:
  88. ///////////////////////////////////////////////////////////////
  89. // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
  90. ENTRY(kvm_vhpt_miss)
  91. KVM_FAULT(0)
  92. END(kvm_vhpt_miss)
  93. .org kvm_ia64_ivt+0x400
  94. ////////////////////////////////////////////////////////////////
  95. // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
  96. ENTRY(kvm_itlb_miss)
  97. mov r31 = pr
  98. mov r29=cr.ipsr;
  99. ;;
  100. tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  101. (p6) br.sptk kvm_alt_itlb_miss
  102. mov r19 = 1
  103. br.sptk kvm_itlb_miss_dispatch
  104. KVM_FAULT(1);
  105. END(kvm_itlb_miss)
  106. .org kvm_ia64_ivt+0x0800
  107. //////////////////////////////////////////////////////////////////
  108. // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
  109. ENTRY(kvm_dtlb_miss)
  110. mov r31 = pr
  111. mov r29=cr.ipsr;
  112. ;;
  113. tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
  114. (p6)br.sptk kvm_alt_dtlb_miss
  115. br.sptk kvm_dtlb_miss_dispatch
  116. END(kvm_dtlb_miss)
  117. .org kvm_ia64_ivt+0x0c00
  118. ////////////////////////////////////////////////////////////////////
  119. // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
  120. ENTRY(kvm_alt_itlb_miss)
  121. mov r16=cr.ifa // get address that caused the TLB miss
  122. ;;
  123. movl r17=PAGE_KERNEL
  124. mov r24=cr.ipsr
  125. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  126. ;;
  127. and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
  128. ;;
  129. or r19=r17,r19 // insert PTE control bits into r19
  130. ;;
  131. movl r20=IA64_GRANULE_SHIFT<<2
  132. ;;
  133. mov cr.itir=r20
  134. ;;
  135. itc.i r19 // insert the TLB entry
  136. mov pr=r31,-1
  137. rfi
  138. END(kvm_alt_itlb_miss)
  139. .org kvm_ia64_ivt+0x1000
  140. /////////////////////////////////////////////////////////////////////
  141. // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
  142. ENTRY(kvm_alt_dtlb_miss)
  143. mov r16=cr.ifa // get address that caused the TLB miss
  144. ;;
  145. movl r17=PAGE_KERNEL
  146. movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
  147. mov r24=cr.ipsr
  148. ;;
  149. and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
  150. ;;
  151. or r19=r19,r17 // insert PTE control bits into r19
  152. ;;
  153. movl r20=IA64_GRANULE_SHIFT<<2
  154. ;;
  155. mov cr.itir=r20
  156. ;;
  157. itc.d r19 // insert the TLB entry
  158. mov pr=r31,-1
  159. rfi
  160. END(kvm_alt_dtlb_miss)
  161. .org kvm_ia64_ivt+0x1400
  162. //////////////////////////////////////////////////////////////////////
  163. // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
  164. ENTRY(kvm_nested_dtlb_miss)
  165. KVM_FAULT(5)
  166. END(kvm_nested_dtlb_miss)
  167. .org kvm_ia64_ivt+0x1800
  168. /////////////////////////////////////////////////////////////////////
  169. // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
  170. ENTRY(kvm_ikey_miss)
  171. KVM_REFLECT(6)
  172. END(kvm_ikey_miss)
  173. .org kvm_ia64_ivt+0x1c00
  174. /////////////////////////////////////////////////////////////////////
  175. // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
  176. ENTRY(kvm_dkey_miss)
  177. KVM_REFLECT(7)
  178. END(kvm_dkey_miss)
  179. .org kvm_ia64_ivt+0x2000
  180. ////////////////////////////////////////////////////////////////////
  181. // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
  182. ENTRY(kvm_dirty_bit)
  183. KVM_REFLECT(8)
  184. END(kvm_dirty_bit)
  185. .org kvm_ia64_ivt+0x2400
  186. ////////////////////////////////////////////////////////////////////
  187. // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
  188. ENTRY(kvm_iaccess_bit)
  189. KVM_REFLECT(9)
  190. END(kvm_iaccess_bit)
  191. .org kvm_ia64_ivt+0x2800
  192. ///////////////////////////////////////////////////////////////////
  193. // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
  194. ENTRY(kvm_daccess_bit)
  195. KVM_REFLECT(10)
  196. END(kvm_daccess_bit)
  197. .org kvm_ia64_ivt+0x2c00
  198. /////////////////////////////////////////////////////////////////
  199. // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
  200. ENTRY(kvm_break_fault)
  201. mov r31=pr
  202. mov r19=11
  203. mov r29=cr.ipsr
  204. ;;
  205. KVM_SAVE_MIN_WITH_COVER_R19
  206. ;;
  207. alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
  208. mov out0=cr.ifa
  209. mov out2=cr.isr // FIXME: pity to make this slow access twice
  210. mov out3=cr.iim // FIXME: pity to make this slow access twice
  211. adds r3=8,r2 // set up second base pointer
  212. ;;
  213. ssm psr.ic
  214. ;;
  215. srlz.i // guarantee that interruption collection is on
  216. ;;
  217. //(p15)ssm psr.i // restore psr.i
  218. addl r14=@gprel(ia64_leave_hypervisor),gp
  219. ;;
  220. KVM_SAVE_REST
  221. mov rp=r14
  222. ;;
  223. adds out1=16,sp
  224. br.call.sptk.many b6=kvm_ia64_handle_break
  225. ;;
  226. END(kvm_break_fault)
  227. .org kvm_ia64_ivt+0x3000
  228. /////////////////////////////////////////////////////////////////
  229. // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
  230. ENTRY(kvm_interrupt)
  231. mov r31=pr // prepare to save predicates
  232. mov r19=12
  233. mov r29=cr.ipsr
  234. ;;
  235. tbit.z p6,p7=r29,IA64_PSR_VM_BIT
  236. tbit.z p0,p15=r29,IA64_PSR_I_BIT
  237. ;;
  238. (p7) br.sptk kvm_dispatch_interrupt
  239. ;;
  240. mov r27=ar.rsc /* M */
  241. mov r20=r1 /* A */
  242. mov r25=ar.unat /* M */
  243. mov r26=ar.pfs /* I */
  244. mov r28=cr.iip /* M */
  245. cover /* B (or nothing) */
  246. ;;
  247. mov r1=sp
  248. ;;
  249. invala /* M */
  250. mov r30=cr.ifs
  251. ;;
  252. addl r1=-VMM_PT_REGS_SIZE,r1
  253. ;;
  254. adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
  255. adds r16=PT(CR_IPSR),r1
  256. ;;
  257. lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
  258. st8 [r16]=r29 /* save cr.ipsr */
  259. ;;
  260. lfetch.fault.excl.nt1 [r17]
  261. mov r29=b0
  262. ;;
  263. adds r16=PT(R8),r1 /* initialize first base pointer */
  264. adds r17=PT(R9),r1 /* initialize second base pointer */
  265. mov r18=r0 /* make sure r18 isn't NaT */
  266. ;;
  267. .mem.offset 0,0; st8.spill [r16]=r8,16
  268. .mem.offset 8,0; st8.spill [r17]=r9,16
  269. ;;
  270. .mem.offset 0,0; st8.spill [r16]=r10,24
  271. .mem.offset 8,0; st8.spill [r17]=r11,24
  272. ;;
  273. st8 [r16]=r28,16 /* save cr.iip */
  274. st8 [r17]=r30,16 /* save cr.ifs */
  275. mov r8=ar.fpsr /* M */
  276. mov r9=ar.csd
  277. mov r10=ar.ssd
  278. movl r11=FPSR_DEFAULT /* L-unit */
  279. ;;
  280. st8 [r16]=r25,16 /* save ar.unat */
  281. st8 [r17]=r26,16 /* save ar.pfs */
  282. shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
  283. ;;
  284. st8 [r16]=r27,16 /* save ar.rsc */
  285. adds r17=16,r17 /* skip over ar_rnat field */
  286. ;;
  287. st8 [r17]=r31,16 /* save predicates */
  288. adds r16=16,r16 /* skip over ar_bspstore field */
  289. ;;
  290. st8 [r16]=r29,16 /* save b0 */
  291. st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
  292. ;;
  293. .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
  294. .mem.offset 8,0; st8.spill [r17]=r12,16
  295. adds r12=-16,r1
  296. /* switch to kernel memory stack (with 16 bytes of scratch) */
  297. ;;
  298. .mem.offset 0,0; st8.spill [r16]=r13,16
  299. .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
  300. ;;
  301. .mem.offset 0,0; st8.spill [r16]=r15,16
  302. .mem.offset 8,0; st8.spill [r17]=r14,16
  303. dep r14=-1,r0,60,4
  304. ;;
  305. .mem.offset 0,0; st8.spill [r16]=r2,16
  306. .mem.offset 8,0; st8.spill [r17]=r3,16
  307. adds r2=VMM_PT_REGS_R16_OFFSET,r1
  308. adds r14 = VMM_VCPU_GP_OFFSET,r13
  309. ;;
  310. mov r8=ar.ccv
  311. ld8 r14 = [r14]
  312. ;;
  313. mov r1=r14 /* establish kernel global pointer */
  314. ;; \
  315. bsw.1
  316. ;;
  317. alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
  318. mov out0=r13
  319. ;;
  320. ssm psr.ic
  321. ;;
  322. srlz.i
  323. ;;
  324. //(p15) ssm psr.i
  325. adds r3=8,r2 // set up second base pointer for SAVE_REST
  326. srlz.i // ensure everybody knows psr.ic is back on
  327. ;;
  328. .mem.offset 0,0; st8.spill [r2]=r16,16
  329. .mem.offset 8,0; st8.spill [r3]=r17,16
  330. ;;
  331. .mem.offset 0,0; st8.spill [r2]=r18,16
  332. .mem.offset 8,0; st8.spill [r3]=r19,16
  333. ;;
  334. .mem.offset 0,0; st8.spill [r2]=r20,16
  335. .mem.offset 8,0; st8.spill [r3]=r21,16
  336. mov r18=b6
  337. ;;
  338. .mem.offset 0,0; st8.spill [r2]=r22,16
  339. .mem.offset 8,0; st8.spill [r3]=r23,16
  340. mov r19=b7
  341. ;;
  342. .mem.offset 0,0; st8.spill [r2]=r24,16
  343. .mem.offset 8,0; st8.spill [r3]=r25,16
  344. ;;
  345. .mem.offset 0,0; st8.spill [r2]=r26,16
  346. .mem.offset 8,0; st8.spill [r3]=r27,16
  347. ;;
  348. .mem.offset 0,0; st8.spill [r2]=r28,16
  349. .mem.offset 8,0; st8.spill [r3]=r29,16
  350. ;;
  351. .mem.offset 0,0; st8.spill [r2]=r30,16
  352. .mem.offset 8,0; st8.spill [r3]=r31,32
  353. ;;
  354. mov ar.fpsr=r11 /* M-unit */
  355. st8 [r2]=r8,8 /* ar.ccv */
  356. adds r24=PT(B6)-PT(F7),r3
  357. ;;
  358. stf.spill [r2]=f6,32
  359. stf.spill [r3]=f7,32
  360. ;;
  361. stf.spill [r2]=f8,32
  362. stf.spill [r3]=f9,32
  363. ;;
  364. stf.spill [r2]=f10
  365. stf.spill [r3]=f11
  366. adds r25=PT(B7)-PT(F11),r3
  367. ;;
  368. st8 [r24]=r18,16 /* b6 */
  369. st8 [r25]=r19,16 /* b7 */
  370. ;;
  371. st8 [r24]=r9 /* ar.csd */
  372. st8 [r25]=r10 /* ar.ssd */
  373. ;;
  374. srlz.d // make sure we see the effect of cr.ivr
  375. addl r14=@gprel(ia64_leave_nested),gp
  376. ;;
  377. mov rp=r14
  378. br.call.sptk.many b6=kvm_ia64_handle_irq
  379. ;;
  380. END(kvm_interrupt)
  381. .global kvm_dispatch_vexirq
  382. .org kvm_ia64_ivt+0x3400
  383. //////////////////////////////////////////////////////////////////////
  384. // 0x3400 Entry 13 (size 64 bundles) Reserved
  385. ENTRY(kvm_virtual_exirq)
  386. mov r31=pr
  387. mov r19=13
  388. mov r30 =r0
  389. ;;
  390. kvm_dispatch_vexirq:
  391. cmp.eq p6,p0 = 1,r30
  392. ;;
  393. (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
  394. ;;
  395. (p6)ld8 r1 = [r29]
  396. ;;
  397. KVM_SAVE_MIN_WITH_COVER_R19
  398. alloc r14=ar.pfs,0,0,1,0
  399. mov out0=r13
  400. ssm psr.ic
  401. ;;
  402. srlz.i // guarantee that interruption collection is on
  403. ;;
  404. //(p15) ssm psr.i // restore psr.i
  405. adds r3=8,r2 // set up second base pointer
  406. ;;
  407. KVM_SAVE_REST
  408. addl r14=@gprel(ia64_leave_hypervisor),gp
  409. ;;
  410. mov rp=r14
  411. br.call.sptk.many b6=kvm_vexirq
  412. END(kvm_virtual_exirq)
  413. .org kvm_ia64_ivt+0x3800
  414. /////////////////////////////////////////////////////////////////////
  415. // 0x3800 Entry 14 (size 64 bundles) Reserved
  416. KVM_FAULT(14)
  417. // this code segment is from 2.6.16.13
  418. .org kvm_ia64_ivt+0x3c00
  419. ///////////////////////////////////////////////////////////////////////
  420. // 0x3c00 Entry 15 (size 64 bundles) Reserved
  421. KVM_FAULT(15)
  422. .org kvm_ia64_ivt+0x4000
  423. ///////////////////////////////////////////////////////////////////////
  424. // 0x4000 Entry 16 (size 64 bundles) Reserved
  425. KVM_FAULT(16)
  426. .org kvm_ia64_ivt+0x4400
  427. //////////////////////////////////////////////////////////////////////
  428. // 0x4400 Entry 17 (size 64 bundles) Reserved
  429. KVM_FAULT(17)
  430. .org kvm_ia64_ivt+0x4800
  431. //////////////////////////////////////////////////////////////////////
  432. // 0x4800 Entry 18 (size 64 bundles) Reserved
  433. KVM_FAULT(18)
  434. .org kvm_ia64_ivt+0x4c00
  435. //////////////////////////////////////////////////////////////////////
  436. // 0x4c00 Entry 19 (size 64 bundles) Reserved
  437. KVM_FAULT(19)
  438. .org kvm_ia64_ivt+0x5000
  439. //////////////////////////////////////////////////////////////////////
  440. // 0x5000 Entry 20 (size 16 bundles) Page Not Present
  441. ENTRY(kvm_page_not_present)
  442. KVM_REFLECT(20)
  443. END(kvm_page_not_present)
  444. .org kvm_ia64_ivt+0x5100
  445. ///////////////////////////////////////////////////////////////////////
  446. // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
  447. ENTRY(kvm_key_permission)
  448. KVM_REFLECT(21)
  449. END(kvm_key_permission)
  450. .org kvm_ia64_ivt+0x5200
  451. //////////////////////////////////////////////////////////////////////
  452. // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  453. ENTRY(kvm_iaccess_rights)
  454. KVM_REFLECT(22)
  455. END(kvm_iaccess_rights)
  456. .org kvm_ia64_ivt+0x5300
  457. //////////////////////////////////////////////////////////////////////
  458. // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  459. ENTRY(kvm_daccess_rights)
  460. KVM_REFLECT(23)
  461. END(kvm_daccess_rights)
  462. .org kvm_ia64_ivt+0x5400
  463. /////////////////////////////////////////////////////////////////////
  464. // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
  465. ENTRY(kvm_general_exception)
  466. KVM_REFLECT(24)
  467. KVM_FAULT(24)
  468. END(kvm_general_exception)
  469. .org kvm_ia64_ivt+0x5500
  470. //////////////////////////////////////////////////////////////////////
  471. // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  472. ENTRY(kvm_disabled_fp_reg)
  473. KVM_REFLECT(25)
  474. END(kvm_disabled_fp_reg)
  475. .org kvm_ia64_ivt+0x5600
  476. ////////////////////////////////////////////////////////////////////
  477. // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
  478. ENTRY(kvm_nat_consumption)
  479. KVM_REFLECT(26)
  480. END(kvm_nat_consumption)
  481. .org kvm_ia64_ivt+0x5700
  482. /////////////////////////////////////////////////////////////////////
  483. // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  484. ENTRY(kvm_speculation_vector)
  485. KVM_REFLECT(27)
  486. END(kvm_speculation_vector)
  487. .org kvm_ia64_ivt+0x5800
  488. /////////////////////////////////////////////////////////////////////
  489. // 0x5800 Entry 28 (size 16 bundles) Reserved
  490. KVM_FAULT(28)
  491. .org kvm_ia64_ivt+0x5900
  492. ///////////////////////////////////////////////////////////////////
  493. // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
  494. ENTRY(kvm_debug_vector)
  495. KVM_FAULT(29)
  496. END(kvm_debug_vector)
  497. .org kvm_ia64_ivt+0x5a00
  498. ///////////////////////////////////////////////////////////////
  499. // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  500. ENTRY(kvm_unaligned_access)
  501. KVM_REFLECT(30)
  502. END(kvm_unaligned_access)
  503. .org kvm_ia64_ivt+0x5b00
  504. //////////////////////////////////////////////////////////////////////
  505. // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  506. ENTRY(kvm_unsupported_data_reference)
  507. KVM_REFLECT(31)
  508. END(kvm_unsupported_data_reference)
  509. .org kvm_ia64_ivt+0x5c00
  510. ////////////////////////////////////////////////////////////////////
  511. // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
  512. ENTRY(kvm_floating_point_fault)
  513. KVM_REFLECT(32)
  514. END(kvm_floating_point_fault)
  515. .org kvm_ia64_ivt+0x5d00
  516. /////////////////////////////////////////////////////////////////////
  517. // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  518. ENTRY(kvm_floating_point_trap)
  519. KVM_REFLECT(33)
  520. END(kvm_floating_point_trap)
  521. .org kvm_ia64_ivt+0x5e00
  522. //////////////////////////////////////////////////////////////////////
  523. // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  524. ENTRY(kvm_lower_privilege_trap)
  525. KVM_REFLECT(34)
  526. END(kvm_lower_privilege_trap)
  527. .org kvm_ia64_ivt+0x5f00
  528. //////////////////////////////////////////////////////////////////////
  529. // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  530. ENTRY(kvm_taken_branch_trap)
  531. KVM_REFLECT(35)
  532. END(kvm_taken_branch_trap)
  533. .org kvm_ia64_ivt+0x6000
  534. ////////////////////////////////////////////////////////////////////
  535. // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  536. ENTRY(kvm_single_step_trap)
  537. KVM_REFLECT(36)
  538. END(kvm_single_step_trap)
  539. .global kvm_virtualization_fault_back
  540. .org kvm_ia64_ivt+0x6100
  541. /////////////////////////////////////////////////////////////////////
  542. // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
  543. ENTRY(kvm_virtualization_fault)
  544. mov r31=pr
  545. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  546. ;;
  547. st8 [r16] = r1
  548. adds r17 = VMM_VCPU_GP_OFFSET, r21
  549. ;;
  550. ld8 r1 = [r17]
  551. cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
  552. cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
  553. cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
  554. cmp.eq p9,p0=EVENT_RSM,r24
  555. cmp.eq p10,p0=EVENT_SSM,r24
  556. cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
  557. cmp.eq p12,p0=EVENT_THASH,r24
  558. (p6) br.dptk.many kvm_asm_mov_from_ar
  559. (p7) br.dptk.many kvm_asm_mov_from_rr
  560. (p8) br.dptk.many kvm_asm_mov_to_rr
  561. (p9) br.dptk.many kvm_asm_rsm
  562. (p10) br.dptk.many kvm_asm_ssm
  563. (p11) br.dptk.many kvm_asm_mov_to_psr
  564. (p12) br.dptk.many kvm_asm_thash
  565. ;;
  566. kvm_virtualization_fault_back:
  567. adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
  568. ;;
  569. ld8 r1 = [r16]
  570. ;;
  571. mov r19=37
  572. adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
  573. adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
  574. ;;
  575. st8 [r16] = r24
  576. st8 [r17] = r25
  577. ;;
  578. cmp.ne p6,p0=EVENT_RFI, r24
  579. (p6) br.sptk kvm_dispatch_virtualization_fault
  580. ;;
  581. adds r18=VMM_VPD_BASE_OFFSET,r21
  582. ;;
  583. ld8 r18=[r18]
  584. ;;
  585. adds r18=VMM_VPD_VIFS_OFFSET,r18
  586. ;;
  587. ld8 r18=[r18]
  588. ;;
  589. tbit.z p6,p0=r18,63
  590. (p6) br.sptk kvm_dispatch_virtualization_fault
  591. ;;
  592. //if vifs.v=1 desert current register frame
  593. alloc r18=ar.pfs,0,0,0,0
  594. br.sptk kvm_dispatch_virtualization_fault
  595. END(kvm_virtualization_fault)
  596. .org kvm_ia64_ivt+0x6200
  597. //////////////////////////////////////////////////////////////
  598. // 0x6200 Entry 38 (size 16 bundles) Reserved
  599. KVM_FAULT(38)
  600. .org kvm_ia64_ivt+0x6300
  601. /////////////////////////////////////////////////////////////////
  602. // 0x6300 Entry 39 (size 16 bundles) Reserved
  603. KVM_FAULT(39)
  604. .org kvm_ia64_ivt+0x6400
  605. /////////////////////////////////////////////////////////////////
  606. // 0x6400 Entry 40 (size 16 bundles) Reserved
  607. KVM_FAULT(40)
  608. .org kvm_ia64_ivt+0x6500
  609. //////////////////////////////////////////////////////////////////
  610. // 0x6500 Entry 41 (size 16 bundles) Reserved
  611. KVM_FAULT(41)
  612. .org kvm_ia64_ivt+0x6600
  613. //////////////////////////////////////////////////////////////////
  614. // 0x6600 Entry 42 (size 16 bundles) Reserved
  615. KVM_FAULT(42)
  616. .org kvm_ia64_ivt+0x6700
  617. //////////////////////////////////////////////////////////////////
  618. // 0x6700 Entry 43 (size 16 bundles) Reserved
  619. KVM_FAULT(43)
  620. .org kvm_ia64_ivt+0x6800
  621. //////////////////////////////////////////////////////////////////
  622. // 0x6800 Entry 44 (size 16 bundles) Reserved
  623. KVM_FAULT(44)
  624. .org kvm_ia64_ivt+0x6900
  625. ///////////////////////////////////////////////////////////////////
  626. // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
  627. //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
  628. ENTRY(kvm_ia32_exception)
  629. KVM_FAULT(45)
  630. END(kvm_ia32_exception)
  631. .org kvm_ia64_ivt+0x6a00
  632. ////////////////////////////////////////////////////////////////////
  633. // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
  634. ENTRY(kvm_ia32_intercept)
  635. KVM_FAULT(47)
  636. END(kvm_ia32_intercept)
  637. .org kvm_ia64_ivt+0x6c00
  638. /////////////////////////////////////////////////////////////////////
  639. // 0x6c00 Entry 48 (size 16 bundles) Reserved
  640. KVM_FAULT(48)
  641. .org kvm_ia64_ivt+0x6d00
  642. //////////////////////////////////////////////////////////////////////
  643. // 0x6d00 Entry 49 (size 16 bundles) Reserved
  644. KVM_FAULT(49)
  645. .org kvm_ia64_ivt+0x6e00
  646. //////////////////////////////////////////////////////////////////////
  647. // 0x6e00 Entry 50 (size 16 bundles) Reserved
  648. KVM_FAULT(50)
  649. .org kvm_ia64_ivt+0x6f00
  650. /////////////////////////////////////////////////////////////////////
  651. // 0x6f00 Entry 51 (size 16 bundles) Reserved
  652. KVM_FAULT(52)
  653. .org kvm_ia64_ivt+0x7100
  654. ////////////////////////////////////////////////////////////////////
  655. // 0x7100 Entry 53 (size 16 bundles) Reserved
  656. KVM_FAULT(53)
  657. .org kvm_ia64_ivt+0x7200
  658. /////////////////////////////////////////////////////////////////////
  659. // 0x7200 Entry 54 (size 16 bundles) Reserved
  660. KVM_FAULT(54)
  661. .org kvm_ia64_ivt+0x7300
  662. ////////////////////////////////////////////////////////////////////
  663. // 0x7300 Entry 55 (size 16 bundles) Reserved
  664. KVM_FAULT(55)
  665. .org kvm_ia64_ivt+0x7400
  666. ////////////////////////////////////////////////////////////////////
  667. // 0x7400 Entry 56 (size 16 bundles) Reserved
  668. KVM_FAULT(56)
  669. .org kvm_ia64_ivt+0x7500
  670. /////////////////////////////////////////////////////////////////////
  671. // 0x7500 Entry 57 (size 16 bundles) Reserved
  672. KVM_FAULT(57)
  673. .org kvm_ia64_ivt+0x7600
  674. /////////////////////////////////////////////////////////////////////
  675. // 0x7600 Entry 58 (size 16 bundles) Reserved
  676. KVM_FAULT(58)
  677. .org kvm_ia64_ivt+0x7700
  678. ////////////////////////////////////////////////////////////////////
  679. // 0x7700 Entry 59 (size 16 bundles) Reserved
  680. KVM_FAULT(59)
  681. .org kvm_ia64_ivt+0x7800
  682. ////////////////////////////////////////////////////////////////////
  683. // 0x7800 Entry 60 (size 16 bundles) Reserved
  684. KVM_FAULT(60)
  685. .org kvm_ia64_ivt+0x7900
  686. /////////////////////////////////////////////////////////////////////
  687. // 0x7900 Entry 61 (size 16 bundles) Reserved
  688. KVM_FAULT(61)
  689. .org kvm_ia64_ivt+0x7a00
  690. /////////////////////////////////////////////////////////////////////
  691. // 0x7a00 Entry 62 (size 16 bundles) Reserved
  692. KVM_FAULT(62)
  693. .org kvm_ia64_ivt+0x7b00
  694. /////////////////////////////////////////////////////////////////////
  695. // 0x7b00 Entry 63 (size 16 bundles) Reserved
  696. KVM_FAULT(63)
  697. .org kvm_ia64_ivt+0x7c00
  698. ////////////////////////////////////////////////////////////////////
  699. // 0x7c00 Entry 64 (size 16 bundles) Reserved
  700. KVM_FAULT(64)
  701. .org kvm_ia64_ivt+0x7d00
  702. /////////////////////////////////////////////////////////////////////
  703. // 0x7d00 Entry 65 (size 16 bundles) Reserved
  704. KVM_FAULT(65)
  705. .org kvm_ia64_ivt+0x7e00
  706. /////////////////////////////////////////////////////////////////////
  707. // 0x7e00 Entry 66 (size 16 bundles) Reserved
  708. KVM_FAULT(66)
  709. .org kvm_ia64_ivt+0x7f00
  710. ////////////////////////////////////////////////////////////////////
  711. // 0x7f00 Entry 67 (size 16 bundles) Reserved
  712. KVM_FAULT(67)
  713. .org kvm_ia64_ivt+0x8000
  714. // There is no particular reason for this code to be here, other than that
  715. // there happens to be space here that would go unused otherwise. If this
  716. // fault ever gets "unreserved", simply moved the following code to a more
  717. // suitable spot...
  718. ENTRY(kvm_dtlb_miss_dispatch)
  719. mov r19 = 2
  720. KVM_SAVE_MIN_WITH_COVER_R19
  721. alloc r14=ar.pfs,0,0,3,0
  722. mov out0=cr.ifa
  723. mov out1=r15
  724. adds r3=8,r2 // set up second base pointer
  725. ;;
  726. ssm psr.ic
  727. ;;
  728. srlz.i // guarantee that interruption collection is on
  729. ;;
  730. //(p15) ssm psr.i // restore psr.i
  731. addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
  732. ;;
  733. KVM_SAVE_REST
  734. KVM_SAVE_EXTRA
  735. mov rp=r14
  736. ;;
  737. adds out2=16,r12
  738. br.call.sptk.many b6=kvm_page_fault
  739. END(kvm_dtlb_miss_dispatch)
  740. ENTRY(kvm_itlb_miss_dispatch)
  741. KVM_SAVE_MIN_WITH_COVER_R19
  742. alloc r14=ar.pfs,0,0,3,0
  743. mov out0=cr.ifa
  744. mov out1=r15
  745. adds r3=8,r2 // set up second base pointer
  746. ;;
  747. ssm psr.ic
  748. ;;
  749. srlz.i // guarantee that interruption collection is on
  750. ;;
  751. //(p15) ssm psr.i // restore psr.i
  752. addl r14=@gprel(ia64_leave_hypervisor),gp
  753. ;;
  754. KVM_SAVE_REST
  755. mov rp=r14
  756. ;;
  757. adds out2=16,r12
  758. br.call.sptk.many b6=kvm_page_fault
  759. END(kvm_itlb_miss_dispatch)
  760. ENTRY(kvm_dispatch_reflection)
  761. /*
  762. * Input:
  763. * psr.ic: off
  764. * r19: intr type (offset into ivt, see ia64_int.h)
  765. * r31: contains saved predicates (pr)
  766. */
  767. KVM_SAVE_MIN_WITH_COVER_R19
  768. alloc r14=ar.pfs,0,0,5,0
  769. mov out0=cr.ifa
  770. mov out1=cr.isr
  771. mov out2=cr.iim
  772. mov out3=r15
  773. adds r3=8,r2 // set up second base pointer
  774. ;;
  775. ssm psr.ic
  776. ;;
  777. srlz.i // guarantee that interruption collection is on
  778. ;;
  779. //(p15) ssm psr.i // restore psr.i
  780. addl r14=@gprel(ia64_leave_hypervisor),gp
  781. ;;
  782. KVM_SAVE_REST
  783. mov rp=r14
  784. ;;
  785. adds out4=16,r12
  786. br.call.sptk.many b6=reflect_interruption
  787. END(kvm_dispatch_reflection)
  788. ENTRY(kvm_dispatch_virtualization_fault)
  789. adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
  790. adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
  791. ;;
  792. st8 [r16] = r24
  793. st8 [r17] = r25
  794. ;;
  795. KVM_SAVE_MIN_WITH_COVER_R19
  796. ;;
  797. alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
  798. mov out0=r13 //vcpu
  799. adds r3=8,r2 // set up second base pointer
  800. ;;
  801. ssm psr.ic
  802. ;;
  803. srlz.i // guarantee that interruption collection is on
  804. ;;
  805. //(p15) ssm psr.i // restore psr.i
  806. addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
  807. ;;
  808. KVM_SAVE_REST
  809. KVM_SAVE_EXTRA
  810. mov rp=r14
  811. ;;
  812. adds out1=16,sp //regs
  813. br.call.sptk.many b6=kvm_emulate
  814. END(kvm_dispatch_virtualization_fault)
  815. ENTRY(kvm_dispatch_interrupt)
  816. KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
  817. ;;
  818. alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
  819. //mov out0=cr.ivr // pass cr.ivr as first arg
  820. adds r3=8,r2 // set up second base pointer for SAVE_REST
  821. ;;
  822. ssm psr.ic
  823. ;;
  824. srlz.i
  825. ;;
  826. //(p15) ssm psr.i
  827. addl r14=@gprel(ia64_leave_hypervisor),gp
  828. ;;
  829. KVM_SAVE_REST
  830. mov rp=r14
  831. ;;
  832. mov out0=r13 // pass pointer to pt_regs as second arg
  833. br.call.sptk.many b6=kvm_ia64_handle_irq
  834. END(kvm_dispatch_interrupt)
  835. GLOBAL_ENTRY(ia64_leave_nested)
  836. rsm psr.i
  837. ;;
  838. adds r21=PT(PR)+16,r12
  839. ;;
  840. lfetch [r21],PT(CR_IPSR)-PT(PR)
  841. adds r2=PT(B6)+16,r12
  842. adds r3=PT(R16)+16,r12
  843. ;;
  844. lfetch [r21]
  845. ld8 r28=[r2],8 // load b6
  846. adds r29=PT(R24)+16,r12
  847. ld8.fill r16=[r3]
  848. adds r3=PT(AR_CSD)-PT(R16),r3
  849. adds r30=PT(AR_CCV)+16,r12
  850. ;;
  851. ld8.fill r24=[r29]
  852. ld8 r15=[r30] // load ar.ccv
  853. ;;
  854. ld8 r29=[r2],16 // load b7
  855. ld8 r30=[r3],16 // load ar.csd
  856. ;;
  857. ld8 r31=[r2],16 // load ar.ssd
  858. ld8.fill r8=[r3],16
  859. ;;
  860. ld8.fill r9=[r2],16
  861. ld8.fill r10=[r3],PT(R17)-PT(R10)
  862. ;;
  863. ld8.fill r11=[r2],PT(R18)-PT(R11)
  864. ld8.fill r17=[r3],16
  865. ;;
  866. ld8.fill r18=[r2],16
  867. ld8.fill r19=[r3],16
  868. ;;
  869. ld8.fill r20=[r2],16
  870. ld8.fill r21=[r3],16
  871. mov ar.csd=r30
  872. mov ar.ssd=r31
  873. ;;
  874. rsm psr.i | psr.ic
  875. // initiate turning off of interrupt and interruption collection
  876. invala // invalidate ALAT
  877. ;;
  878. srlz.i
  879. ;;
  880. ld8.fill r22=[r2],24
  881. ld8.fill r23=[r3],24
  882. mov b6=r28
  883. ;;
  884. ld8.fill r25=[r2],16
  885. ld8.fill r26=[r3],16
  886. mov b7=r29
  887. ;;
  888. ld8.fill r27=[r2],16
  889. ld8.fill r28=[r3],16
  890. ;;
  891. ld8.fill r29=[r2],16
  892. ld8.fill r30=[r3],24
  893. ;;
  894. ld8.fill r31=[r2],PT(F9)-PT(R31)
  895. adds r3=PT(F10)-PT(F6),r3
  896. ;;
  897. ldf.fill f9=[r2],PT(F6)-PT(F9)
  898. ldf.fill f10=[r3],PT(F8)-PT(F10)
  899. ;;
  900. ldf.fill f6=[r2],PT(F7)-PT(F6)
  901. ;;
  902. ldf.fill f7=[r2],PT(F11)-PT(F7)
  903. ldf.fill f8=[r3],32
  904. ;;
  905. srlz.i // ensure interruption collection is off
  906. mov ar.ccv=r15
  907. ;;
  908. bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
  909. ;;
  910. ldf.fill f11=[r2]
  911. // mov r18=r13
  912. // mov r21=r13
  913. adds r16=PT(CR_IPSR)+16,r12
  914. adds r17=PT(CR_IIP)+16,r12
  915. ;;
  916. ld8 r29=[r16],16 // load cr.ipsr
  917. ld8 r28=[r17],16 // load cr.iip
  918. ;;
  919. ld8 r30=[r16],16 // load cr.ifs
  920. ld8 r25=[r17],16 // load ar.unat
  921. ;;
  922. ld8 r26=[r16],16 // load ar.pfs
  923. ld8 r27=[r17],16 // load ar.rsc
  924. cmp.eq p9,p0=r0,r0
  925. // set p9 to indicate that we should restore cr.ifs
  926. ;;
  927. ld8 r24=[r16],16 // load ar.rnat (may be garbage)
  928. ld8 r23=[r17],16// load ar.bspstore (may be garbage)
  929. ;;
  930. ld8 r31=[r16],16 // load predicates
  931. ld8 r22=[r17],16 // load b0
  932. ;;
  933. ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
  934. ld8.fill r1=[r17],16 // load r1
  935. ;;
  936. ld8.fill r12=[r16],16
  937. ld8.fill r13=[r17],16
  938. ;;
  939. ld8 r20=[r16],16 // ar.fpsr
  940. ld8.fill r15=[r17],16
  941. ;;
  942. ld8.fill r14=[r16],16
  943. ld8.fill r2=[r17]
  944. ;;
  945. ld8.fill r3=[r16]
  946. ;;
  947. mov r16=ar.bsp // get existing backing store pointer
  948. ;;
  949. mov b0=r22
  950. mov ar.pfs=r26
  951. mov cr.ifs=r30
  952. mov cr.ipsr=r29
  953. mov ar.fpsr=r20
  954. mov cr.iip=r28
  955. ;;
  956. mov ar.rsc=r27
  957. mov ar.unat=r25
  958. mov pr=r31,-1
  959. rfi
  960. END(ia64_leave_nested)
  961. GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
  962. /*
  963. * work.need_resched etc. mustn't get changed
  964. *by this CPU before it returns to
  965. ;;
  966. * user- or fsys-mode, hence we disable interrupts early on:
  967. */
  968. adds r2 = PT(R4)+16,r12
  969. adds r3 = PT(R5)+16,r12
  970. adds r8 = PT(EML_UNAT)+16,r12
  971. ;;
  972. ld8 r8 = [r8]
  973. ;;
  974. mov ar.unat=r8
  975. ;;
  976. ld8.fill r4=[r2],16 //load r4
  977. ld8.fill r5=[r3],16 //load r5
  978. ;;
  979. ld8.fill r6=[r2] //load r6
  980. ld8.fill r7=[r3] //load r7
  981. ;;
  982. END(ia64_leave_hypervisor_prepare)
  983. //fall through
  984. GLOBAL_ENTRY(ia64_leave_hypervisor)
  985. rsm psr.i
  986. ;;
  987. br.call.sptk.many b0=leave_hypervisor_tail
  988. ;;
  989. adds r20=PT(PR)+16,r12
  990. adds r8=PT(EML_UNAT)+16,r12
  991. ;;
  992. ld8 r8=[r8]
  993. ;;
  994. mov ar.unat=r8
  995. ;;
  996. lfetch [r20],PT(CR_IPSR)-PT(PR)
  997. adds r2 = PT(B6)+16,r12
  998. adds r3 = PT(B7)+16,r12
  999. ;;
  1000. lfetch [r20]
  1001. ;;
  1002. ld8 r24=[r2],16 /* B6 */
  1003. ld8 r25=[r3],16 /* B7 */
  1004. ;;
  1005. ld8 r26=[r2],16 /* ar_csd */
  1006. ld8 r27=[r3],16 /* ar_ssd */
  1007. mov b6 = r24
  1008. ;;
  1009. ld8.fill r8=[r2],16
  1010. ld8.fill r9=[r3],16
  1011. mov b7 = r25
  1012. ;;
  1013. mov ar.csd = r26
  1014. mov ar.ssd = r27
  1015. ;;
  1016. ld8.fill r10=[r2],PT(R15)-PT(R10)
  1017. ld8.fill r11=[r3],PT(R14)-PT(R11)
  1018. ;;
  1019. ld8.fill r15=[r2],PT(R16)-PT(R15)
  1020. ld8.fill r14=[r3],PT(R17)-PT(R14)
  1021. ;;
  1022. ld8.fill r16=[r2],16
  1023. ld8.fill r17=[r3],16
  1024. ;;
  1025. ld8.fill r18=[r2],16
  1026. ld8.fill r19=[r3],16
  1027. ;;
  1028. ld8.fill r20=[r2],16
  1029. ld8.fill r21=[r3],16
  1030. ;;
  1031. ld8.fill r22=[r2],16
  1032. ld8.fill r23=[r3],16
  1033. ;;
  1034. ld8.fill r24=[r2],16
  1035. ld8.fill r25=[r3],16
  1036. ;;
  1037. ld8.fill r26=[r2],16
  1038. ld8.fill r27=[r3],16
  1039. ;;
  1040. ld8.fill r28=[r2],16
  1041. ld8.fill r29=[r3],16
  1042. ;;
  1043. ld8.fill r30=[r2],PT(F6)-PT(R30)
  1044. ld8.fill r31=[r3],PT(F7)-PT(R31)
  1045. ;;
  1046. rsm psr.i | psr.ic
  1047. // initiate turning off of interrupt and interruption collection
  1048. invala // invalidate ALAT
  1049. ;;
  1050. srlz.i // ensure interruption collection is off
  1051. ;;
  1052. bsw.0
  1053. ;;
  1054. adds r16 = PT(CR_IPSR)+16,r12
  1055. adds r17 = PT(CR_IIP)+16,r12
  1056. mov r21=r13 // get current
  1057. ;;
  1058. ld8 r31=[r16],16 // load cr.ipsr
  1059. ld8 r30=[r17],16 // load cr.iip
  1060. ;;
  1061. ld8 r29=[r16],16 // load cr.ifs
  1062. ld8 r28=[r17],16 // load ar.unat
  1063. ;;
  1064. ld8 r27=[r16],16 // load ar.pfs
  1065. ld8 r26=[r17],16 // load ar.rsc
  1066. ;;
  1067. ld8 r25=[r16],16 // load ar.rnat
  1068. ld8 r24=[r17],16 // load ar.bspstore
  1069. ;;
  1070. ld8 r23=[r16],16 // load predicates
  1071. ld8 r22=[r17],16 // load b0
  1072. ;;
  1073. ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
  1074. ld8.fill r1=[r17],16 //load r1
  1075. ;;
  1076. ld8.fill r12=[r16],16 //load r12
  1077. ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
  1078. ;;
  1079. ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
  1080. ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
  1081. ;;
  1082. ld8.fill r3=[r16] //load r3
  1083. ld8 r18=[r17] //load ar_ccv
  1084. ;;
  1085. mov ar.fpsr=r19
  1086. mov ar.ccv=r18
  1087. shr.u r18=r20,16
  1088. ;;
  1089. kvm_rbs_switch:
  1090. mov r19=96
  1091. kvm_dont_preserve_current_frame:
  1092. /*
  1093. * To prevent leaking bits between the hypervisor and guest domain,
  1094. * we must clear the stacked registers in the "invalid" partition here.
  1095. * 5 registers/cycle on McKinley).
  1096. */
  1097. # define pRecurse p6
  1098. # define pReturn p7
  1099. # define Nregs 14
  1100. alloc loc0=ar.pfs,2,Nregs-2,2,0
  1101. shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
  1102. sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
  1103. ;;
  1104. mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
  1105. shladd in0=loc1,3,r19
  1106. mov in1=0
  1107. ;;
  1108. TEXT_ALIGN(32)
  1109. kvm_rse_clear_invalid:
  1110. alloc loc0=ar.pfs,2,Nregs-2,2,0
  1111. cmp.lt pRecurse,p0=Nregs*8,in0
  1112. // if more than Nregs regs left to clear, (re)curse
  1113. add out0=-Nregs*8,in0
  1114. add out1=1,in1 // increment recursion count
  1115. mov loc1=0
  1116. mov loc2=0
  1117. ;;
  1118. mov loc3=0
  1119. mov loc4=0
  1120. mov loc5=0
  1121. mov loc6=0
  1122. mov loc7=0
  1123. (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
  1124. ;;
  1125. mov loc8=0
  1126. mov loc9=0
  1127. cmp.ne pReturn,p0=r0,in1
  1128. // if recursion count != 0, we need to do a br.ret
  1129. mov loc10=0
  1130. mov loc11=0
  1131. (pReturn) br.ret.dptk.many b0
  1132. # undef pRecurse
  1133. # undef pReturn
  1134. // loadrs has already been shifted
  1135. alloc r16=ar.pfs,0,0,0,0 // drop current register frame
  1136. ;;
  1137. loadrs
  1138. ;;
  1139. mov ar.bspstore=r24
  1140. ;;
  1141. mov ar.unat=r28
  1142. mov ar.rnat=r25
  1143. mov ar.rsc=r26
  1144. ;;
  1145. mov cr.ipsr=r31
  1146. mov cr.iip=r30
  1147. mov cr.ifs=r29
  1148. mov ar.pfs=r27
  1149. adds r18=VMM_VPD_BASE_OFFSET,r21
  1150. ;;
  1151. ld8 r18=[r18] //vpd
  1152. adds r17=VMM_VCPU_ISR_OFFSET,r21
  1153. ;;
  1154. ld8 r17=[r17]
  1155. adds r19=VMM_VPD_VPSR_OFFSET,r18
  1156. ;;
  1157. ld8 r19=[r19] //vpsr
  1158. adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
  1159. ;;
  1160. ld8 r20=[r20]
  1161. ;;
  1162. //vsa_sync_write_start
  1163. mov r25=r18
  1164. adds r16= VMM_VCPU_GP_OFFSET,r21
  1165. ;;
  1166. ld8 r16= [r16] // Put gp in r24
  1167. movl r24=@gprel(ia64_vmm_entry) // calculate return address
  1168. ;;
  1169. add r24=r24,r16
  1170. ;;
  1171. add r16=PAL_VPS_SYNC_WRITE,r20
  1172. ;;
  1173. mov b0=r16
  1174. br.cond.sptk b0 // call the service
  1175. ;;
  1176. END(ia64_leave_hypervisor)
  1177. // fall through
  1178. GLOBAL_ENTRY(ia64_vmm_entry)
  1179. /*
  1180. * must be at bank 0
  1181. * parameter:
  1182. * r17:cr.isr
  1183. * r18:vpd
  1184. * r19:vpsr
  1185. * r20:__vsa_base
  1186. * r22:b0
  1187. * r23:predicate
  1188. */
  1189. mov r24=r22
  1190. mov r25=r18
  1191. tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
  1192. ;;
  1193. (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
  1194. (p1) br.sptk.many ia64_vmm_entry_out
  1195. ;;
  1196. tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
  1197. ;;
  1198. (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
  1199. (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
  1200. (p2) ld8 r26=[r25]
  1201. ;;
  1202. ia64_vmm_entry_out:
  1203. mov pr=r23,-2
  1204. mov b0=r29
  1205. ;;
  1206. br.cond.sptk b0 // call pal service
  1207. END(ia64_vmm_entry)
  1208. /*
  1209. * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
  1210. * u64 arg3, u64 arg4, u64 arg5,
  1211. * u64 arg6, u64 arg7);
  1212. *
  1213. * XXX: The currently defined services use only 4 args at the max. The
  1214. * rest are not consumed.
  1215. */
  1216. GLOBAL_ENTRY(ia64_call_vsa)
  1217. .regstk 4,4,0,0
  1218. rpsave = loc0
  1219. pfssave = loc1
  1220. psrsave = loc2
  1221. entry = loc3
  1222. hostret = r24
  1223. alloc pfssave=ar.pfs,4,4,0,0
  1224. mov rpsave=rp
  1225. adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
  1226. ;;
  1227. ld8 entry=[entry]
  1228. 1: mov hostret=ip
  1229. mov r25=in1 // copy arguments
  1230. mov r26=in2
  1231. mov r27=in3
  1232. mov psrsave=psr
  1233. ;;
  1234. tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
  1235. tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
  1236. ;;
  1237. add hostret=2f-1b,hostret // calculate return address
  1238. add entry=entry,in0
  1239. ;;
  1240. rsm psr.i | psr.ic
  1241. ;;
  1242. srlz.i
  1243. mov b6=entry
  1244. br.cond.sptk b6 // call the service
  1245. 2:
  1246. // Architectural sequence for enabling interrupts if necessary
  1247. (p7) ssm psr.ic
  1248. ;;
  1249. (p7) srlz.i
  1250. ;;
  1251. //(p6) ssm psr.i
  1252. ;;
  1253. mov rp=rpsave
  1254. mov ar.pfs=pfssave
  1255. mov r8=r31
  1256. ;;
  1257. srlz.d
  1258. br.ret.sptk rp
  1259. END(ia64_call_vsa)
  1260. #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
  1261. GLOBAL_ENTRY(vmm_reset_entry)
  1262. //set up ipsr, iip, vpd.vpsr, dcr
  1263. // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
  1264. // For DCR: all bits 0
  1265. adds r14=-VMM_PT_REGS_SIZE, r12
  1266. ;;
  1267. movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
  1268. movl r10=0x8000000000000000
  1269. adds r16=PT(CR_IIP), r14
  1270. adds r20=PT(R1), r14
  1271. ;;
  1272. rsm psr.ic | psr.i
  1273. ;;
  1274. srlz.i
  1275. ;;
  1276. bsw.0
  1277. ;;
  1278. mov r21 =r13
  1279. ;;
  1280. bsw.1
  1281. ;;
  1282. mov ar.rsc = 0
  1283. ;;
  1284. flushrs
  1285. ;;
  1286. mov ar.bspstore = 0
  1287. // clear BSPSTORE
  1288. ;;
  1289. mov cr.ipsr=r6
  1290. mov cr.ifs=r10
  1291. ld8 r4 = [r16] // Set init iip for first run.
  1292. ld8 r1 = [r20]
  1293. ;;
  1294. mov cr.iip=r4
  1295. ;;
  1296. adds r16=VMM_VPD_BASE_OFFSET,r13
  1297. adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
  1298. ;;
  1299. ld8 r18=[r16]
  1300. ld8 r20=[r20]
  1301. ;;
  1302. adds r19=VMM_VPD_VPSR_OFFSET,r18
  1303. ;;
  1304. ld8 r19=[r19]
  1305. mov r17=r0
  1306. mov r22=r0
  1307. mov r23=r0
  1308. br.cond.sptk ia64_vmm_entry
  1309. br.ret.sptk b0
  1310. END(vmm_reset_entry)