kvm_minstate.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * kvm_minstate.h: min save macros
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  6. * Xiantao Zhang (xiantao.zhang@intel.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  19. * Place - Suite 330, Boston, MA 02111-1307 USA.
  20. *
  21. */
  22. #include <asm/asmmacro.h>
  23. #include <asm/types.h>
  24. #include <asm/kregs.h>
  25. #include "asm-offsets.h"
  26. #define KVM_MINSTATE_START_SAVE_MIN \
  27. mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
  28. ;; \
  29. mov.m r28 = ar.rnat; \
  30. addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
  31. ;; \
  32. lfetch.fault.excl.nt1 [r22]; \
  33. addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
  34. mov r23 = ar.bspstore; /* save ar.bspstore */ \
  35. ;; \
  36. mov ar.bspstore = r22; /* switch to kernel RBS */\
  37. ;; \
  38. mov r18 = ar.bsp; \
  39. mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
  40. #define KVM_MINSTATE_END_SAVE_MIN \
  41. bsw.1; /* switch back to bank 1 (must be last in insn group) */\
  42. ;;
  43. #define PAL_VSA_SYNC_READ \
  44. /* begin to call pal vps sync_read */ \
  45. {.mii; \
  46. add r25 = VMM_VPD_BASE_OFFSET, r21; \
  47. nop 0x0; \
  48. mov r24=ip; \
  49. ;; \
  50. } \
  51. {.mmb \
  52. add r24=0x20, r24; \
  53. ld8 r25 = [r25]; /* read vpd base */ \
  54. br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
  55. ;; \
  56. }; \
  57. #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
  58. /*
  59. * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  60. * the minimum state necessary that allows us to turn psr.ic back
  61. * on.
  62. *
  63. * Assumed state upon entry:
  64. * psr.ic: off
  65. * r31: contains saved predicates (pr)
  66. *
  67. * Upon exit, the state is as follows:
  68. * psr.ic: off
  69. * r2 = points to &pt_regs.r16
  70. * r8 = contents of ar.ccv
  71. * r9 = contents of ar.csd
  72. * r10 = contents of ar.ssd
  73. * r11 = FPSR_DEFAULT
  74. * r12 = kernel sp (kernel virtual address)
  75. * r13 = points to current task_struct (kernel virtual address)
  76. * p15 = TRUE if psr.i is set in cr.ipsr
  77. * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
  78. * preserved
  79. *
  80. * Note that psr.ic is NOT turned on by this macro. This is so that
  81. * we can pass interruption state as arguments to a handler.
  82. */
  83. #define PT(f) (VMM_PT_REGS_##f##_OFFSET)
  84. #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
  85. KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
  86. mov r27 = ar.rsc; /* M */ \
  87. mov r20 = r1; /* A */ \
  88. mov r25 = ar.unat; /* M */ \
  89. mov r29 = cr.ipsr; /* M */ \
  90. mov r26 = ar.pfs; /* I */ \
  91. mov r18 = cr.isr; \
  92. COVER; /* B;; (or nothing) */ \
  93. ;; \
  94. tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
  95. mov r1 = r16; \
  96. /* mov r21=r16; */ \
  97. /* switch from user to kernel RBS: */ \
  98. ;; \
  99. invala; /* M */ \
  100. SAVE_IFS; \
  101. ;; \
  102. KVM_MINSTATE_START_SAVE_MIN \
  103. adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
  104. adds r16 = PT(CR_IPSR),r1; \
  105. ;; \
  106. lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
  107. st8 [r16] = r29; /* save cr.ipsr */ \
  108. ;; \
  109. lfetch.fault.excl.nt1 [r17]; \
  110. tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
  111. mov r29 = b0 \
  112. ;; \
  113. adds r16 = PT(R8),r1; /* initialize first base pointer */\
  114. adds r17 = PT(R9),r1; /* initialize second base pointer */\
  115. ;; \
  116. .mem.offset 0,0; st8.spill [r16] = r8,16; \
  117. .mem.offset 8,0; st8.spill [r17] = r9,16; \
  118. ;; \
  119. .mem.offset 0,0; st8.spill [r16] = r10,24; \
  120. .mem.offset 8,0; st8.spill [r17] = r11,24; \
  121. ;; \
  122. mov r9 = cr.iip; /* M */ \
  123. mov r10 = ar.fpsr; /* M */ \
  124. ;; \
  125. st8 [r16] = r9,16; /* save cr.iip */ \
  126. st8 [r17] = r30,16; /* save cr.ifs */ \
  127. sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
  128. ;; \
  129. st8 [r16] = r25,16; /* save ar.unat */ \
  130. st8 [r17] = r26,16; /* save ar.pfs */ \
  131. shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
  132. ;; \
  133. st8 [r16] = r27,16; /* save ar.rsc */ \
  134. st8 [r17] = r28,16; /* save ar.rnat */ \
  135. ;; /* avoid RAW on r16 & r17 */ \
  136. st8 [r16] = r23,16; /* save ar.bspstore */ \
  137. st8 [r17] = r31,16; /* save predicates */ \
  138. ;; \
  139. st8 [r16] = r29,16; /* save b0 */ \
  140. st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
  141. ;; \
  142. .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
  143. .mem.offset 8,0; st8.spill [r17] = r12,16; \
  144. adds r12 = -16,r1; /* switch to kernel memory stack */ \
  145. ;; \
  146. .mem.offset 0,0; st8.spill [r16] = r13,16; \
  147. .mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
  148. mov r13 = r21; /* establish `current' */ \
  149. ;; \
  150. .mem.offset 0,0; st8.spill [r16] = r15,16; \
  151. .mem.offset 8,0; st8.spill [r17] = r14,16; \
  152. ;; \
  153. .mem.offset 0,0; st8.spill [r16] = r2,16; \
  154. .mem.offset 8,0; st8.spill [r17] = r3,16; \
  155. adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
  156. ;; \
  157. adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
  158. adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
  159. mov r26 = cr.iipa; \
  160. mov r27 = cr.isr; \
  161. ;; \
  162. st8 [r16] = r26; \
  163. st8 [r17] = r27; \
  164. ;; \
  165. EXTRA; \
  166. mov r8 = ar.ccv; \
  167. mov r9 = ar.csd; \
  168. mov r10 = ar.ssd; \
  169. movl r11 = FPSR_DEFAULT; /* L-unit */ \
  170. adds r17 = VMM_VCPU_GP_OFFSET,r13; \
  171. ;; \
  172. ld8 r1 = [r17];/* establish kernel global pointer */ \
  173. ;; \
  174. PAL_VSA_SYNC_READ \
  175. KVM_MINSTATE_END_SAVE_MIN
  176. /*
  177. * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
  178. *
  179. * Assumed state upon entry:
  180. * psr.ic: on
  181. * r2: points to &pt_regs.f6
  182. * r3: points to &pt_regs.f7
  183. * r8: contents of ar.ccv
  184. * r9: contents of ar.csd
  185. * r10: contents of ar.ssd
  186. * r11: FPSR_DEFAULT
  187. *
  188. * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  189. */
  190. #define KVM_SAVE_REST \
  191. .mem.offset 0,0; st8.spill [r2] = r16,16; \
  192. .mem.offset 8,0; st8.spill [r3] = r17,16; \
  193. ;; \
  194. .mem.offset 0,0; st8.spill [r2] = r18,16; \
  195. .mem.offset 8,0; st8.spill [r3] = r19,16; \
  196. ;; \
  197. .mem.offset 0,0; st8.spill [r2] = r20,16; \
  198. .mem.offset 8,0; st8.spill [r3] = r21,16; \
  199. mov r18=b6; \
  200. ;; \
  201. .mem.offset 0,0; st8.spill [r2] = r22,16; \
  202. .mem.offset 8,0; st8.spill [r3] = r23,16; \
  203. mov r19 = b7; \
  204. ;; \
  205. .mem.offset 0,0; st8.spill [r2] = r24,16; \
  206. .mem.offset 8,0; st8.spill [r3] = r25,16; \
  207. ;; \
  208. .mem.offset 0,0; st8.spill [r2] = r26,16; \
  209. .mem.offset 8,0; st8.spill [r3] = r27,16; \
  210. ;; \
  211. .mem.offset 0,0; st8.spill [r2] = r28,16; \
  212. .mem.offset 8,0; st8.spill [r3] = r29,16; \
  213. ;; \
  214. .mem.offset 0,0; st8.spill [r2] = r30,16; \
  215. .mem.offset 8,0; st8.spill [r3] = r31,32; \
  216. ;; \
  217. mov ar.fpsr = r11; \
  218. st8 [r2] = r8,8; \
  219. adds r24 = PT(B6)-PT(F7),r3; \
  220. adds r25 = PT(B7)-PT(F7),r3; \
  221. ;; \
  222. st8 [r24] = r18,16; /* b6 */ \
  223. st8 [r25] = r19,16; /* b7 */ \
  224. adds r2 = PT(R4)-PT(F6),r2; \
  225. adds r3 = PT(R5)-PT(F7),r3; \
  226. ;; \
  227. st8 [r24] = r9; /* ar.csd */ \
  228. st8 [r25] = r10; /* ar.ssd */ \
  229. ;; \
  230. mov r18 = ar.unat; \
  231. adds r19 = PT(EML_UNAT)-PT(R4),r2; \
  232. ;; \
  233. st8 [r19] = r18; /* eml_unat */ \
  234. #define KVM_SAVE_EXTRA \
  235. .mem.offset 0,0; st8.spill [r2] = r4,16; \
  236. .mem.offset 8,0; st8.spill [r3] = r5,16; \
  237. ;; \
  238. .mem.offset 0,0; st8.spill [r2] = r6,16; \
  239. .mem.offset 8,0; st8.spill [r3] = r7; \
  240. ;; \
  241. mov r26 = ar.unat; \
  242. ;; \
  243. st8 [r2] = r26;/* eml_unat */ \
  244. #define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
  245. #define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
  246. #define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )