hyp.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <linux/irqchip/arm-gic.h>
  19. #include <asm/assembler.h>
  20. #include <asm/memory.h>
  21. #include <asm/asm-offsets.h>
  22. #include <asm/fpsimdmacros.h>
  23. #include <asm/kvm.h>
  24. #include <asm/kvm_asm.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_mmu.h>
  27. #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
  28. #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  29. #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
  30. #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
  31. .text
  32. .pushsection .hyp.text, "ax"
  33. .align PAGE_SHIFT
  34. __kvm_hyp_code_start:
  35. .globl __kvm_hyp_code_start
  36. .macro save_common_regs
  37. // x2: base address for cpu context
  38. // x3: tmp register
  39. add x3, x2, #CPU_XREG_OFFSET(19)
  40. stp x19, x20, [x3]
  41. stp x21, x22, [x3, #16]
  42. stp x23, x24, [x3, #32]
  43. stp x25, x26, [x3, #48]
  44. stp x27, x28, [x3, #64]
  45. stp x29, lr, [x3, #80]
  46. mrs x19, sp_el0
  47. mrs x20, elr_el2 // EL1 PC
  48. mrs x21, spsr_el2 // EL1 pstate
  49. stp x19, x20, [x3, #96]
  50. str x21, [x3, #112]
  51. mrs x22, sp_el1
  52. mrs x23, elr_el1
  53. mrs x24, spsr_el1
  54. str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  55. str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  56. str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  57. .endm
  58. .macro restore_common_regs
  59. // x2: base address for cpu context
  60. // x3: tmp register
  61. ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
  62. ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
  63. ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
  64. msr sp_el1, x22
  65. msr elr_el1, x23
  66. msr spsr_el1, x24
  67. add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
  68. ldp x19, x20, [x3]
  69. ldr x21, [x3, #16]
  70. msr sp_el0, x19
  71. msr elr_el2, x20 // EL1 PC
  72. msr spsr_el2, x21 // EL1 pstate
  73. add x3, x2, #CPU_XREG_OFFSET(19)
  74. ldp x19, x20, [x3]
  75. ldp x21, x22, [x3, #16]
  76. ldp x23, x24, [x3, #32]
  77. ldp x25, x26, [x3, #48]
  78. ldp x27, x28, [x3, #64]
  79. ldp x29, lr, [x3, #80]
  80. .endm
  81. .macro save_host_regs
  82. save_common_regs
  83. .endm
  84. .macro restore_host_regs
  85. restore_common_regs
  86. .endm
  87. .macro save_fpsimd
  88. // x2: cpu context address
  89. // x3, x4: tmp regs
  90. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  91. fpsimd_save x3, 4
  92. .endm
  93. .macro restore_fpsimd
  94. // x2: cpu context address
  95. // x3, x4: tmp regs
  96. add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
  97. fpsimd_restore x3, 4
  98. .endm
  99. .macro save_guest_regs
  100. // x0 is the vcpu address
  101. // x1 is the return code, do not corrupt!
  102. // x2 is the cpu context
  103. // x3 is a tmp register
  104. // Guest's x0-x3 are on the stack
  105. // Compute base to save registers
  106. add x3, x2, #CPU_XREG_OFFSET(4)
  107. stp x4, x5, [x3]
  108. stp x6, x7, [x3, #16]
  109. stp x8, x9, [x3, #32]
  110. stp x10, x11, [x3, #48]
  111. stp x12, x13, [x3, #64]
  112. stp x14, x15, [x3, #80]
  113. stp x16, x17, [x3, #96]
  114. str x18, [x3, #112]
  115. pop x6, x7 // x2, x3
  116. pop x4, x5 // x0, x1
  117. add x3, x2, #CPU_XREG_OFFSET(0)
  118. stp x4, x5, [x3]
  119. stp x6, x7, [x3, #16]
  120. save_common_regs
  121. .endm
  122. .macro restore_guest_regs
  123. // x0 is the vcpu address.
  124. // x2 is the cpu context
  125. // x3 is a tmp register
  126. // Prepare x0-x3 for later restore
  127. add x3, x2, #CPU_XREG_OFFSET(0)
  128. ldp x4, x5, [x3]
  129. ldp x6, x7, [x3, #16]
  130. push x4, x5 // Push x0-x3 on the stack
  131. push x6, x7
  132. // x4-x18
  133. ldp x4, x5, [x3, #32]
  134. ldp x6, x7, [x3, #48]
  135. ldp x8, x9, [x3, #64]
  136. ldp x10, x11, [x3, #80]
  137. ldp x12, x13, [x3, #96]
  138. ldp x14, x15, [x3, #112]
  139. ldp x16, x17, [x3, #128]
  140. ldr x18, [x3, #144]
  141. // x19-x29, lr, sp*, elr*, spsr*
  142. restore_common_regs
  143. // Last bits of the 64bit state
  144. pop x2, x3
  145. pop x0, x1
  146. // Do not touch any register after this!
  147. .endm
  148. /*
  149. * Macros to perform system register save/restore.
  150. *
  151. * Ordering here is absolutely critical, and must be kept consistent
  152. * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
  153. * and in kvm_asm.h.
  154. *
  155. * In other words, don't touch any of these unless you know what
  156. * you are doing.
  157. */
  158. .macro save_sysregs
  159. // x2: base address for cpu context
  160. // x3: tmp register
  161. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  162. mrs x4, vmpidr_el2
  163. mrs x5, csselr_el1
  164. mrs x6, sctlr_el1
  165. mrs x7, actlr_el1
  166. mrs x8, cpacr_el1
  167. mrs x9, ttbr0_el1
  168. mrs x10, ttbr1_el1
  169. mrs x11, tcr_el1
  170. mrs x12, esr_el1
  171. mrs x13, afsr0_el1
  172. mrs x14, afsr1_el1
  173. mrs x15, far_el1
  174. mrs x16, mair_el1
  175. mrs x17, vbar_el1
  176. mrs x18, contextidr_el1
  177. mrs x19, tpidr_el0
  178. mrs x20, tpidrro_el0
  179. mrs x21, tpidr_el1
  180. mrs x22, amair_el1
  181. mrs x23, cntkctl_el1
  182. stp x4, x5, [x3]
  183. stp x6, x7, [x3, #16]
  184. stp x8, x9, [x3, #32]
  185. stp x10, x11, [x3, #48]
  186. stp x12, x13, [x3, #64]
  187. stp x14, x15, [x3, #80]
  188. stp x16, x17, [x3, #96]
  189. stp x18, x19, [x3, #112]
  190. stp x20, x21, [x3, #128]
  191. stp x22, x23, [x3, #144]
  192. .endm
  193. .macro restore_sysregs
  194. // x2: base address for cpu context
  195. // x3: tmp register
  196. add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
  197. ldp x4, x5, [x3]
  198. ldp x6, x7, [x3, #16]
  199. ldp x8, x9, [x3, #32]
  200. ldp x10, x11, [x3, #48]
  201. ldp x12, x13, [x3, #64]
  202. ldp x14, x15, [x3, #80]
  203. ldp x16, x17, [x3, #96]
  204. ldp x18, x19, [x3, #112]
  205. ldp x20, x21, [x3, #128]
  206. ldp x22, x23, [x3, #144]
  207. msr vmpidr_el2, x4
  208. msr csselr_el1, x5
  209. msr sctlr_el1, x6
  210. msr actlr_el1, x7
  211. msr cpacr_el1, x8
  212. msr ttbr0_el1, x9
  213. msr ttbr1_el1, x10
  214. msr tcr_el1, x11
  215. msr esr_el1, x12
  216. msr afsr0_el1, x13
  217. msr afsr1_el1, x14
  218. msr far_el1, x15
  219. msr mair_el1, x16
  220. msr vbar_el1, x17
  221. msr contextidr_el1, x18
  222. msr tpidr_el0, x19
  223. msr tpidrro_el0, x20
  224. msr tpidr_el1, x21
  225. msr amair_el1, x22
  226. msr cntkctl_el1, x23
  227. .endm
  228. .macro skip_32bit_state tmp, target
  229. // Skip 32bit state if not needed
  230. mrs \tmp, hcr_el2
  231. tbnz \tmp, #HCR_RW_SHIFT, \target
  232. .endm
  233. .macro skip_tee_state tmp, target
  234. // Skip ThumbEE state if not needed
  235. mrs \tmp, id_pfr0_el1
  236. tbz \tmp, #12, \target
  237. .endm
  238. .macro save_guest_32bit_state
  239. skip_32bit_state x3, 1f
  240. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  241. mrs x4, spsr_abt
  242. mrs x5, spsr_und
  243. mrs x6, spsr_irq
  244. mrs x7, spsr_fiq
  245. stp x4, x5, [x3]
  246. stp x6, x7, [x3, #16]
  247. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  248. mrs x4, dacr32_el2
  249. mrs x5, ifsr32_el2
  250. mrs x6, fpexc32_el2
  251. mrs x7, dbgvcr32_el2
  252. stp x4, x5, [x3]
  253. stp x6, x7, [x3, #16]
  254. skip_tee_state x8, 1f
  255. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  256. mrs x4, teecr32_el1
  257. mrs x5, teehbr32_el1
  258. stp x4, x5, [x3]
  259. 1:
  260. .endm
  261. .macro restore_guest_32bit_state
  262. skip_32bit_state x3, 1f
  263. add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
  264. ldp x4, x5, [x3]
  265. ldp x6, x7, [x3, #16]
  266. msr spsr_abt, x4
  267. msr spsr_und, x5
  268. msr spsr_irq, x6
  269. msr spsr_fiq, x7
  270. add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
  271. ldp x4, x5, [x3]
  272. ldp x6, x7, [x3, #16]
  273. msr dacr32_el2, x4
  274. msr ifsr32_el2, x5
  275. msr fpexc32_el2, x6
  276. msr dbgvcr32_el2, x7
  277. skip_tee_state x8, 1f
  278. add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
  279. ldp x4, x5, [x3]
  280. msr teecr32_el1, x4
  281. msr teehbr32_el1, x5
  282. 1:
  283. .endm
  284. .macro activate_traps
  285. ldr x2, [x0, #VCPU_IRQ_LINES]
  286. ldr x1, [x0, #VCPU_HCR_EL2]
  287. orr x2, x2, x1
  288. msr hcr_el2, x2
  289. ldr x2, =(CPTR_EL2_TTA)
  290. msr cptr_el2, x2
  291. ldr x2, =(1 << 15) // Trap CP15 Cr=15
  292. msr hstr_el2, x2
  293. mrs x2, mdcr_el2
  294. and x2, x2, #MDCR_EL2_HPMN_MASK
  295. orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
  296. msr mdcr_el2, x2
  297. .endm
  298. .macro deactivate_traps
  299. mov x2, #HCR_RW
  300. msr hcr_el2, x2
  301. msr cptr_el2, xzr
  302. msr hstr_el2, xzr
  303. mrs x2, mdcr_el2
  304. and x2, x2, #MDCR_EL2_HPMN_MASK
  305. msr mdcr_el2, x2
  306. .endm
  307. .macro activate_vm
  308. ldr x1, [x0, #VCPU_KVM]
  309. kern_hyp_va x1
  310. ldr x2, [x1, #KVM_VTTBR]
  311. msr vttbr_el2, x2
  312. .endm
  313. .macro deactivate_vm
  314. msr vttbr_el2, xzr
  315. .endm
  316. /*
  317. * Save the VGIC CPU state into memory
  318. * x0: Register pointing to VCPU struct
  319. * Do not corrupt x1!!!
  320. */
  321. .macro save_vgic_state
  322. /* Get VGIC VCTRL base into x2 */
  323. ldr x2, [x0, #VCPU_KVM]
  324. kern_hyp_va x2
  325. ldr x2, [x2, #KVM_VGIC_VCTRL]
  326. kern_hyp_va x2
  327. cbz x2, 2f // disabled
  328. /* Compute the address of struct vgic_cpu */
  329. add x3, x0, #VCPU_VGIC_CPU
  330. /* Save all interesting registers */
  331. ldr w4, [x2, #GICH_HCR]
  332. ldr w5, [x2, #GICH_VMCR]
  333. ldr w6, [x2, #GICH_MISR]
  334. ldr w7, [x2, #GICH_EISR0]
  335. ldr w8, [x2, #GICH_EISR1]
  336. ldr w9, [x2, #GICH_ELRSR0]
  337. ldr w10, [x2, #GICH_ELRSR1]
  338. ldr w11, [x2, #GICH_APR]
  339. str w4, [x3, #VGIC_CPU_HCR]
  340. str w5, [x3, #VGIC_CPU_VMCR]
  341. str w6, [x3, #VGIC_CPU_MISR]
  342. str w7, [x3, #VGIC_CPU_EISR]
  343. str w8, [x3, #(VGIC_CPU_EISR + 4)]
  344. str w9, [x3, #VGIC_CPU_ELRSR]
  345. str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
  346. str w11, [x3, #VGIC_CPU_APR]
  347. /* Clear GICH_HCR */
  348. str wzr, [x2, #GICH_HCR]
  349. /* Save list registers */
  350. add x2, x2, #GICH_LR0
  351. ldr w4, [x3, #VGIC_CPU_NR_LR]
  352. add x3, x3, #VGIC_CPU_LR
  353. 1: ldr w5, [x2], #4
  354. str w5, [x3], #4
  355. sub w4, w4, #1
  356. cbnz w4, 1b
  357. 2:
  358. .endm
  359. /*
  360. * Restore the VGIC CPU state from memory
  361. * x0: Register pointing to VCPU struct
  362. */
  363. .macro restore_vgic_state
  364. /* Get VGIC VCTRL base into x2 */
  365. ldr x2, [x0, #VCPU_KVM]
  366. kern_hyp_va x2
  367. ldr x2, [x2, #KVM_VGIC_VCTRL]
  368. kern_hyp_va x2
  369. cbz x2, 2f // disabled
  370. /* Compute the address of struct vgic_cpu */
  371. add x3, x0, #VCPU_VGIC_CPU
  372. /* We only restore a minimal set of registers */
  373. ldr w4, [x3, #VGIC_CPU_HCR]
  374. ldr w5, [x3, #VGIC_CPU_VMCR]
  375. ldr w6, [x3, #VGIC_CPU_APR]
  376. str w4, [x2, #GICH_HCR]
  377. str w5, [x2, #GICH_VMCR]
  378. str w6, [x2, #GICH_APR]
  379. /* Restore list registers */
  380. add x2, x2, #GICH_LR0
  381. ldr w4, [x3, #VGIC_CPU_NR_LR]
  382. add x3, x3, #VGIC_CPU_LR
  383. 1: ldr w5, [x3], #4
  384. str w5, [x2], #4
  385. sub w4, w4, #1
  386. cbnz w4, 1b
  387. 2:
  388. .endm
  389. .macro save_timer_state
  390. // x0: vcpu pointer
  391. ldr x2, [x0, #VCPU_KVM]
  392. kern_hyp_va x2
  393. ldr w3, [x2, #KVM_TIMER_ENABLED]
  394. cbz w3, 1f
  395. mrs x3, cntv_ctl_el0
  396. and x3, x3, #3
  397. str w3, [x0, #VCPU_TIMER_CNTV_CTL]
  398. bic x3, x3, #1 // Clear Enable
  399. msr cntv_ctl_el0, x3
  400. isb
  401. mrs x3, cntv_cval_el0
  402. str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
  403. 1:
  404. // Allow physical timer/counter access for the host
  405. mrs x2, cnthctl_el2
  406. orr x2, x2, #3
  407. msr cnthctl_el2, x2
  408. // Clear cntvoff for the host
  409. msr cntvoff_el2, xzr
  410. .endm
  411. .macro restore_timer_state
  412. // x0: vcpu pointer
  413. // Disallow physical timer access for the guest
  414. // Physical counter access is allowed
  415. mrs x2, cnthctl_el2
  416. orr x2, x2, #1
  417. bic x2, x2, #2
  418. msr cnthctl_el2, x2
  419. ldr x2, [x0, #VCPU_KVM]
  420. kern_hyp_va x2
  421. ldr w3, [x2, #KVM_TIMER_ENABLED]
  422. cbz w3, 1f
  423. ldr x3, [x2, #KVM_TIMER_CNTVOFF]
  424. msr cntvoff_el2, x3
  425. ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
  426. msr cntv_cval_el0, x2
  427. isb
  428. ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
  429. and x2, x2, #3
  430. msr cntv_ctl_el0, x2
  431. 1:
  432. .endm
  433. __save_sysregs:
  434. save_sysregs
  435. ret
  436. __restore_sysregs:
  437. restore_sysregs
  438. ret
  439. __save_fpsimd:
  440. save_fpsimd
  441. ret
  442. __restore_fpsimd:
  443. restore_fpsimd
  444. ret
  445. /*
  446. * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
  447. *
  448. * This is the world switch. The first half of the function
  449. * deals with entering the guest, and anything from __kvm_vcpu_return
  450. * to the end of the function deals with reentering the host.
  451. * On the enter path, only x0 (vcpu pointer) must be preserved until
  452. * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
  453. * code) must both be preserved until the epilogue.
  454. * In both cases, x2 points to the CPU context we're saving/restoring from/to.
  455. */
  456. ENTRY(__kvm_vcpu_run)
  457. kern_hyp_va x0
  458. msr tpidr_el2, x0 // Save the vcpu register
  459. // Host context
  460. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  461. kern_hyp_va x2
  462. save_host_regs
  463. bl __save_fpsimd
  464. bl __save_sysregs
  465. activate_traps
  466. activate_vm
  467. restore_vgic_state
  468. restore_timer_state
  469. // Guest context
  470. add x2, x0, #VCPU_CONTEXT
  471. bl __restore_sysregs
  472. bl __restore_fpsimd
  473. restore_guest_32bit_state
  474. restore_guest_regs
  475. // That's it, no more messing around.
  476. eret
  477. __kvm_vcpu_return:
  478. // Assume x0 is the vcpu pointer, x1 the return code
  479. // Guest's x0-x3 are on the stack
  480. // Guest context
  481. add x2, x0, #VCPU_CONTEXT
  482. save_guest_regs
  483. bl __save_fpsimd
  484. bl __save_sysregs
  485. save_guest_32bit_state
  486. save_timer_state
  487. save_vgic_state
  488. deactivate_traps
  489. deactivate_vm
  490. // Host context
  491. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  492. kern_hyp_va x2
  493. bl __restore_sysregs
  494. bl __restore_fpsimd
  495. restore_host_regs
  496. mov x0, x1
  497. ret
  498. END(__kvm_vcpu_run)
  499. // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  500. ENTRY(__kvm_tlb_flush_vmid_ipa)
  501. kern_hyp_va x0
  502. ldr x2, [x0, #KVM_VTTBR]
  503. msr vttbr_el2, x2
  504. isb
  505. /*
  506. * We could do so much better if we had the VA as well.
  507. * Instead, we invalidate Stage-2 for this IPA, and the
  508. * whole of Stage-1. Weep...
  509. */
  510. tlbi ipas2e1is, x1
  511. dsb sy
  512. tlbi vmalle1is
  513. dsb sy
  514. isb
  515. msr vttbr_el2, xzr
  516. ret
  517. ENDPROC(__kvm_tlb_flush_vmid_ipa)
  518. ENTRY(__kvm_flush_vm_context)
  519. tlbi alle1is
  520. ic ialluis
  521. dsb sy
  522. ret
  523. ENDPROC(__kvm_flush_vm_context)
  524. __kvm_hyp_panic:
  525. // Guess the context by looking at VTTBR:
  526. // If zero, then we're already a host.
  527. // Otherwise restore a minimal host context before panicing.
  528. mrs x0, vttbr_el2
  529. cbz x0, 1f
  530. mrs x0, tpidr_el2
  531. deactivate_traps
  532. deactivate_vm
  533. ldr x2, [x0, #VCPU_HOST_CONTEXT]
  534. kern_hyp_va x2
  535. bl __restore_sysregs
  536. 1: adr x0, __hyp_panic_str
  537. adr x1, 2f
  538. ldp x2, x3, [x1]
  539. sub x0, x0, x2
  540. add x0, x0, x3
  541. mrs x1, spsr_el2
  542. mrs x2, elr_el2
  543. mrs x3, esr_el2
  544. mrs x4, far_el2
  545. mrs x5, hpfar_el2
  546. mrs x6, par_el1
  547. mrs x7, tpidr_el2
  548. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  549. PSR_MODE_EL1h)
  550. msr spsr_el2, lr
  551. ldr lr, =panic
  552. msr elr_el2, lr
  553. eret
  554. .align 3
  555. 2: .quad HYP_PAGE_OFFSET
  556. .quad PAGE_OFFSET
  557. ENDPROC(__kvm_hyp_panic)
  558. __hyp_panic_str:
  559. .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
  560. .align 2
  561. ENTRY(kvm_call_hyp)
  562. hvc #0
  563. ret
  564. ENDPROC(kvm_call_hyp)
  565. .macro invalid_vector label, target
  566. .align 2
  567. \label:
  568. b \target
  569. ENDPROC(\label)
  570. .endm
  571. /* None of these should ever happen */
  572. invalid_vector el2t_sync_invalid, __kvm_hyp_panic
  573. invalid_vector el2t_irq_invalid, __kvm_hyp_panic
  574. invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
  575. invalid_vector el2t_error_invalid, __kvm_hyp_panic
  576. invalid_vector el2h_sync_invalid, __kvm_hyp_panic
  577. invalid_vector el2h_irq_invalid, __kvm_hyp_panic
  578. invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
  579. invalid_vector el2h_error_invalid, __kvm_hyp_panic
  580. invalid_vector el1_sync_invalid, __kvm_hyp_panic
  581. invalid_vector el1_irq_invalid, __kvm_hyp_panic
  582. invalid_vector el1_fiq_invalid, __kvm_hyp_panic
  583. invalid_vector el1_error_invalid, __kvm_hyp_panic
  584. el1_sync: // Guest trapped into EL2
  585. push x0, x1
  586. push x2, x3
  587. mrs x1, esr_el2
  588. lsr x2, x1, #ESR_EL2_EC_SHIFT
  589. cmp x2, #ESR_EL2_EC_HVC64
  590. b.ne el1_trap
  591. mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
  592. cbnz x3, el1_trap // called HVC
  593. /* Here, we're pretty sure the host called HVC. */
  594. pop x2, x3
  595. pop x0, x1
  596. push lr, xzr
  597. /*
  598. * Compute the function address in EL2, and shuffle the parameters.
  599. */
  600. kern_hyp_va x0
  601. mov lr, x0
  602. mov x0, x1
  603. mov x1, x2
  604. mov x2, x3
  605. blr lr
  606. pop lr, xzr
  607. eret
  608. el1_trap:
  609. /*
  610. * x1: ESR
  611. * x2: ESR_EC
  612. */
  613. cmp x2, #ESR_EL2_EC_DABT
  614. mov x0, #ESR_EL2_EC_IABT
  615. ccmp x2, x0, #4, ne
  616. b.ne 1f // Not an abort we care about
  617. /* This is an abort. Check for permission fault */
  618. and x2, x1, #ESR_EL2_FSC_TYPE
  619. cmp x2, #FSC_PERM
  620. b.ne 1f // Not a permission fault
  621. /*
  622. * Check for Stage-1 page table walk, which is guaranteed
  623. * to give a valid HPFAR_EL2.
  624. */
  625. tbnz x1, #7, 1f // S1PTW is set
  626. /*
  627. * Permission fault, HPFAR_EL2 is invalid.
  628. * Resolve the IPA the hard way using the guest VA.
  629. * Stage-1 translation already validated the memory access rights.
  630. * As such, we can use the EL1 translation regime, and don't have
  631. * to distinguish between EL0 and EL1 access.
  632. */
  633. mrs x2, far_el2
  634. at s1e1r, x2
  635. isb
  636. /* Read result */
  637. mrs x3, par_el1
  638. tbnz x3, #0, 3f // Bail out if we failed the translation
  639. ubfx x3, x3, #12, #36 // Extract IPA
  640. lsl x3, x3, #4 // and present it like HPFAR
  641. b 2f
  642. 1: mrs x3, hpfar_el2
  643. mrs x2, far_el2
  644. 2: mrs x0, tpidr_el2
  645. str x1, [x0, #VCPU_ESR_EL2]
  646. str x2, [x0, #VCPU_FAR_EL2]
  647. str x3, [x0, #VCPU_HPFAR_EL2]
  648. mov x1, #ARM_EXCEPTION_TRAP
  649. b __kvm_vcpu_return
  650. /*
  651. * Translation failed. Just return to the guest and
  652. * let it fault again. Another CPU is probably playing
  653. * behind our back.
  654. */
  655. 3: pop x2, x3
  656. pop x0, x1
  657. eret
  658. el1_irq:
  659. push x0, x1
  660. push x2, x3
  661. mrs x0, tpidr_el2
  662. mov x1, #ARM_EXCEPTION_IRQ
  663. b __kvm_vcpu_return
  664. .ltorg
  665. .align 11
  666. ENTRY(__kvm_hyp_vector)
  667. ventry el2t_sync_invalid // Synchronous EL2t
  668. ventry el2t_irq_invalid // IRQ EL2t
  669. ventry el2t_fiq_invalid // FIQ EL2t
  670. ventry el2t_error_invalid // Error EL2t
  671. ventry el2h_sync_invalid // Synchronous EL2h
  672. ventry el2h_irq_invalid // IRQ EL2h
  673. ventry el2h_fiq_invalid // FIQ EL2h
  674. ventry el2h_error_invalid // Error EL2h
  675. ventry el1_sync // Synchronous 64-bit EL1
  676. ventry el1_irq // IRQ 64-bit EL1
  677. ventry el1_fiq_invalid // FIQ 64-bit EL1
  678. ventry el1_error_invalid // Error 64-bit EL1
  679. ventry el1_sync // Synchronous 32-bit EL1
  680. ventry el1_irq // IRQ 32-bit EL1
  681. ventry el1_fiq_invalid // FIQ 32-bit EL1
  682. ventry el1_error_invalid // Error 32-bit EL1
  683. ENDPROC(__kvm_hyp_vector)
  684. __kvm_hyp_code_end:
  685. .globl __kvm_hyp_code_end
  686. .popsection