kvm_minstate.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * kvm_minstate.h: min save macros
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  6. * Xiantao Zhang (xiantao.zhang@intel.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  19. * Place - Suite 330, Boston, MA 02111-1307 USA.
  20. *
  21. */
  22. #include <asm/asmmacro.h>
  23. #include <asm/types.h>
  24. #include <asm/kregs.h>
  25. #include "asm-offsets.h"
  26. #define KVM_MINSTATE_START_SAVE_MIN \
  27. mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
  28. ;; \
  29. mov.m r28 = ar.rnat; \
  30. addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
  31. ;; \
  32. lfetch.fault.excl.nt1 [r22]; \
  33. addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
  34. mov r23 = ar.bspstore; /* save ar.bspstore */ \
  35. ;; \
  36. mov ar.bspstore = r22; /* switch to kernel RBS */\
  37. ;; \
  38. mov r18 = ar.bsp; \
  39. mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
  40. #define KVM_MINSTATE_END_SAVE_MIN \
  41. bsw.1; /* switch back to bank 1 (must be last in insn group) */\
  42. ;;
  43. #define PAL_VSA_SYNC_READ \
  44. /* begin to call pal vps sync_read */ \
  45. add r25 = VMM_VPD_BASE_OFFSET, r21; \
  46. adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
  47. ;; \
  48. ld8 r25 = [r25]; /* read vpd base */ \
  49. ld8 r20 = [r20]; \
  50. ;; \
  51. add r20 = PAL_VPS_SYNC_READ,r20; \
  52. ;; \
  53. { .mii; \
  54. nop 0x0; \
  55. mov r24 = ip; \
  56. mov b0 = r20; \
  57. ;; \
  58. }; \
  59. { .mmb; \
  60. add r24 = 0x20, r24; \
  61. nop 0x0; \
  62. br.cond.sptk b0; /* call the service */ \
  63. ;; \
  64. };
  65. #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
  66. /*
  67. * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  68. * the minimum state necessary that allows us to turn psr.ic back
  69. * on.
  70. *
  71. * Assumed state upon entry:
  72. * psr.ic: off
  73. * r31: contains saved predicates (pr)
  74. *
  75. * Upon exit, the state is as follows:
  76. * psr.ic: off
  77. * r2 = points to &pt_regs.r16
  78. * r8 = contents of ar.ccv
  79. * r9 = contents of ar.csd
  80. * r10 = contents of ar.ssd
  81. * r11 = FPSR_DEFAULT
  82. * r12 = kernel sp (kernel virtual address)
  83. * r13 = points to current task_struct (kernel virtual address)
  84. * p15 = TRUE if psr.i is set in cr.ipsr
  85. * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
  86. * preserved
  87. *
  88. * Note that psr.ic is NOT turned on by this macro. This is so that
  89. * we can pass interruption state as arguments to a handler.
  90. */
  91. #define PT(f) (VMM_PT_REGS_##f##_OFFSET)
  92. #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
  93. KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
  94. mov r27 = ar.rsc; /* M */ \
  95. mov r20 = r1; /* A */ \
  96. mov r25 = ar.unat; /* M */ \
  97. mov r29 = cr.ipsr; /* M */ \
  98. mov r26 = ar.pfs; /* I */ \
  99. mov r18 = cr.isr; \
  100. COVER; /* B;; (or nothing) */ \
  101. ;; \
  102. tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
  103. mov r1 = r16; \
  104. /* mov r21=r16; */ \
  105. /* switch from user to kernel RBS: */ \
  106. ;; \
  107. invala; /* M */ \
  108. SAVE_IFS; \
  109. ;; \
  110. KVM_MINSTATE_START_SAVE_MIN \
  111. adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
  112. adds r16 = PT(CR_IPSR),r1; \
  113. ;; \
  114. lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
  115. st8 [r16] = r29; /* save cr.ipsr */ \
  116. ;; \
  117. lfetch.fault.excl.nt1 [r17]; \
  118. tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
  119. mov r29 = b0 \
  120. ;; \
  121. adds r16 = PT(R8),r1; /* initialize first base pointer */\
  122. adds r17 = PT(R9),r1; /* initialize second base pointer */\
  123. ;; \
  124. .mem.offset 0,0; st8.spill [r16] = r8,16; \
  125. .mem.offset 8,0; st8.spill [r17] = r9,16; \
  126. ;; \
  127. .mem.offset 0,0; st8.spill [r16] = r10,24; \
  128. .mem.offset 8,0; st8.spill [r17] = r11,24; \
  129. ;; \
  130. mov r9 = cr.iip; /* M */ \
  131. mov r10 = ar.fpsr; /* M */ \
  132. ;; \
  133. st8 [r16] = r9,16; /* save cr.iip */ \
  134. st8 [r17] = r30,16; /* save cr.ifs */ \
  135. sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
  136. ;; \
  137. st8 [r16] = r25,16; /* save ar.unat */ \
  138. st8 [r17] = r26,16; /* save ar.pfs */ \
  139. shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
  140. ;; \
  141. st8 [r16] = r27,16; /* save ar.rsc */ \
  142. st8 [r17] = r28,16; /* save ar.rnat */ \
  143. ;; /* avoid RAW on r16 & r17 */ \
  144. st8 [r16] = r23,16; /* save ar.bspstore */ \
  145. st8 [r17] = r31,16; /* save predicates */ \
  146. ;; \
  147. st8 [r16] = r29,16; /* save b0 */ \
  148. st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
  149. ;; \
  150. .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
  151. .mem.offset 8,0; st8.spill [r17] = r12,16; \
  152. adds r12 = -16,r1; /* switch to kernel memory stack */ \
  153. ;; \
  154. .mem.offset 0,0; st8.spill [r16] = r13,16; \
  155. .mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
  156. mov r13 = r21; /* establish `current' */ \
  157. ;; \
  158. .mem.offset 0,0; st8.spill [r16] = r15,16; \
  159. .mem.offset 8,0; st8.spill [r17] = r14,16; \
  160. ;; \
  161. .mem.offset 0,0; st8.spill [r16] = r2,16; \
  162. .mem.offset 8,0; st8.spill [r17] = r3,16; \
  163. adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
  164. ;; \
  165. adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
  166. adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
  167. mov r26 = cr.iipa; \
  168. mov r27 = cr.isr; \
  169. ;; \
  170. st8 [r16] = r26; \
  171. st8 [r17] = r27; \
  172. ;; \
  173. EXTRA; \
  174. mov r8 = ar.ccv; \
  175. mov r9 = ar.csd; \
  176. mov r10 = ar.ssd; \
  177. movl r11 = FPSR_DEFAULT; /* L-unit */ \
  178. adds r17 = VMM_VCPU_GP_OFFSET,r13; \
  179. ;; \
  180. ld8 r1 = [r17];/* establish kernel global pointer */ \
  181. ;; \
  182. PAL_VSA_SYNC_READ \
  183. KVM_MINSTATE_END_SAVE_MIN
  184. /*
  185. * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
  186. *
  187. * Assumed state upon entry:
  188. * psr.ic: on
  189. * r2: points to &pt_regs.f6
  190. * r3: points to &pt_regs.f7
  191. * r8: contents of ar.ccv
  192. * r9: contents of ar.csd
  193. * r10: contents of ar.ssd
  194. * r11: FPSR_DEFAULT
  195. *
  196. * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  197. */
  198. #define KVM_SAVE_REST \
  199. .mem.offset 0,0; st8.spill [r2] = r16,16; \
  200. .mem.offset 8,0; st8.spill [r3] = r17,16; \
  201. ;; \
  202. .mem.offset 0,0; st8.spill [r2] = r18,16; \
  203. .mem.offset 8,0; st8.spill [r3] = r19,16; \
  204. ;; \
  205. .mem.offset 0,0; st8.spill [r2] = r20,16; \
  206. .mem.offset 8,0; st8.spill [r3] = r21,16; \
  207. mov r18=b6; \
  208. ;; \
  209. .mem.offset 0,0; st8.spill [r2] = r22,16; \
  210. .mem.offset 8,0; st8.spill [r3] = r23,16; \
  211. mov r19 = b7; \
  212. ;; \
  213. .mem.offset 0,0; st8.spill [r2] = r24,16; \
  214. .mem.offset 8,0; st8.spill [r3] = r25,16; \
  215. ;; \
  216. .mem.offset 0,0; st8.spill [r2] = r26,16; \
  217. .mem.offset 8,0; st8.spill [r3] = r27,16; \
  218. ;; \
  219. .mem.offset 0,0; st8.spill [r2] = r28,16; \
  220. .mem.offset 8,0; st8.spill [r3] = r29,16; \
  221. ;; \
  222. .mem.offset 0,0; st8.spill [r2] = r30,16; \
  223. .mem.offset 8,0; st8.spill [r3] = r31,32; \
  224. ;; \
  225. mov ar.fpsr = r11; \
  226. st8 [r2] = r8,8; \
  227. adds r24 = PT(B6)-PT(F7),r3; \
  228. adds r25 = PT(B7)-PT(F7),r3; \
  229. ;; \
  230. st8 [r24] = r18,16; /* b6 */ \
  231. st8 [r25] = r19,16; /* b7 */ \
  232. adds r2 = PT(R4)-PT(F6),r2; \
  233. adds r3 = PT(R5)-PT(F7),r3; \
  234. ;; \
  235. st8 [r24] = r9; /* ar.csd */ \
  236. st8 [r25] = r10; /* ar.ssd */ \
  237. ;; \
  238. mov r18 = ar.unat; \
  239. adds r19 = PT(EML_UNAT)-PT(R4),r2; \
  240. ;; \
  241. st8 [r19] = r18; /* eml_unat */ \
  242. #define KVM_SAVE_EXTRA \
  243. .mem.offset 0,0; st8.spill [r2] = r4,16; \
  244. .mem.offset 8,0; st8.spill [r3] = r5,16; \
  245. ;; \
  246. .mem.offset 0,0; st8.spill [r2] = r6,16; \
  247. .mem.offset 8,0; st8.spill [r3] = r7; \
  248. ;; \
  249. mov r26 = ar.unat; \
  250. ;; \
  251. st8 [r2] = r26;/* eml_unat */ \
  252. #define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
  253. #define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
  254. #define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )