inst.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /******************************************************************************
  2. * arch/ia64/include/asm/xen/inst.h
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <asm/xen/privop.h>
  23. #define ia64_ivt xen_ivt
  24. #define DO_SAVE_MIN XEN_DO_SAVE_MIN
  25. #define __paravirt_switch_to xen_switch_to
  26. #define __paravirt_leave_syscall xen_leave_syscall
  27. #define __paravirt_work_processed_syscall xen_work_processed_syscall
  28. #define __paravirt_leave_kernel xen_leave_kernel
  29. #define __paravirt_pending_syscall_end xen_work_pending_syscall_end
  30. #define __paravirt_work_processed_syscall_target \
  31. xen_work_processed_syscall
  32. #define MOV_FROM_IFA(reg) \
  33. movl reg = XSI_IFA; \
  34. ;; \
  35. ld8 reg = [reg]
  36. #define MOV_FROM_ITIR(reg) \
  37. movl reg = XSI_ITIR; \
  38. ;; \
  39. ld8 reg = [reg]
  40. #define MOV_FROM_ISR(reg) \
  41. movl reg = XSI_ISR; \
  42. ;; \
  43. ld8 reg = [reg]
  44. #define MOV_FROM_IHA(reg) \
  45. movl reg = XSI_IHA; \
  46. ;; \
  47. ld8 reg = [reg]
  48. #define MOV_FROM_IPSR(pred, reg) \
  49. (pred) movl reg = XSI_IPSR; \
  50. ;; \
  51. (pred) ld8 reg = [reg]
  52. #define MOV_FROM_IIM(reg) \
  53. movl reg = XSI_IIM; \
  54. ;; \
  55. ld8 reg = [reg]
  56. #define MOV_FROM_IIP(reg) \
  57. movl reg = XSI_IIP; \
  58. ;; \
  59. ld8 reg = [reg]
  60. .macro __MOV_FROM_IVR reg, clob
  61. .ifc "\reg", "r8"
  62. XEN_HYPER_GET_IVR
  63. .exitm
  64. .endif
  65. .ifc "\clob", "r8"
  66. XEN_HYPER_GET_IVR
  67. ;;
  68. mov \reg = r8
  69. .exitm
  70. .endif
  71. mov \clob = r8
  72. ;;
  73. XEN_HYPER_GET_IVR
  74. ;;
  75. mov \reg = r8
  76. ;;
  77. mov r8 = \clob
  78. .endm
  79. #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
  80. .macro __MOV_FROM_PSR pred, reg, clob
  81. .ifc "\reg", "r8"
  82. (\pred) XEN_HYPER_GET_PSR;
  83. .exitm
  84. .endif
  85. .ifc "\clob", "r8"
  86. (\pred) XEN_HYPER_GET_PSR
  87. ;;
  88. (\pred) mov \reg = r8
  89. .exitm
  90. .endif
  91. (\pred) mov \clob = r8
  92. (\pred) XEN_HYPER_GET_PSR
  93. ;;
  94. (\pred) mov \reg = r8
  95. (\pred) mov r8 = \clob
  96. .endm
  97. #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
  98. #define MOV_TO_IFA(reg, clob) \
  99. movl clob = XSI_IFA; \
  100. ;; \
  101. st8 [clob] = reg \
  102. #define MOV_TO_ITIR(pred, reg, clob) \
  103. (pred) movl clob = XSI_ITIR; \
  104. ;; \
  105. (pred) st8 [clob] = reg
  106. #define MOV_TO_IHA(pred, reg, clob) \
  107. (pred) movl clob = XSI_IHA; \
  108. ;; \
  109. (pred) st8 [clob] = reg
  110. #define MOV_TO_IPSR(pred, reg, clob) \
  111. (pred) movl clob = XSI_IPSR; \
  112. ;; \
  113. (pred) st8 [clob] = reg; \
  114. ;;
  115. #define MOV_TO_IFS(pred, reg, clob) \
  116. (pred) movl clob = XSI_IFS; \
  117. ;; \
  118. (pred) st8 [clob] = reg; \
  119. ;;
  120. #define MOV_TO_IIP(reg, clob) \
  121. movl clob = XSI_IIP; \
  122. ;; \
  123. st8 [clob] = reg
  124. .macro ____MOV_TO_KR kr, reg, clob0, clob1
  125. .ifc "\clob0", "r9"
  126. .error "clob0 \clob0 must not be r9"
  127. .endif
  128. .ifc "\clob1", "r8"
  129. .error "clob1 \clob1 must not be r8"
  130. .endif
  131. .ifnc "\reg", "r9"
  132. .ifnc "\clob1", "r9"
  133. mov \clob1 = r9
  134. .endif
  135. mov r9 = \reg
  136. .endif
  137. .ifnc "\clob0", "r8"
  138. mov \clob0 = r8
  139. .endif
  140. mov r8 = \kr
  141. ;;
  142. XEN_HYPER_SET_KR
  143. .ifnc "\reg", "r9"
  144. .ifnc "\clob1", "r9"
  145. mov r9 = \clob1
  146. .endif
  147. .endif
  148. .ifnc "\clob0", "r8"
  149. mov r8 = \clob0
  150. .endif
  151. .endm
  152. .macro __MOV_TO_KR kr, reg, clob0, clob1
  153. .ifc "\clob0", "r9"
  154. ____MOV_TO_KR \kr, \reg, \clob1, \clob0
  155. .exitm
  156. .endif
  157. .ifc "\clob1", "r8"
  158. ____MOV_TO_KR \kr, \reg, \clob1, \clob0
  159. .exitm
  160. .endif
  161. ____MOV_TO_KR \kr, \reg, \clob0, \clob1
  162. .endm
  163. #define MOV_TO_KR(kr, reg, clob0, clob1) \
  164. __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
  165. .macro __ITC_I pred, reg, clob
  166. .ifc "\reg", "r8"
  167. (\pred) XEN_HYPER_ITC_I
  168. .exitm
  169. .endif
  170. .ifc "\clob", "r8"
  171. (\pred) mov r8 = \reg
  172. ;;
  173. (\pred) XEN_HYPER_ITC_I
  174. .exitm
  175. .endif
  176. (\pred) mov \clob = r8
  177. (\pred) mov r8 = \reg
  178. ;;
  179. (\pred) XEN_HYPER_ITC_I
  180. ;;
  181. (\pred) mov r8 = \clob
  182. ;;
  183. .endm
  184. #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
  185. .macro __ITC_D pred, reg, clob
  186. .ifc "\reg", "r8"
  187. (\pred) XEN_HYPER_ITC_D
  188. ;;
  189. .exitm
  190. .endif
  191. .ifc "\clob", "r8"
  192. (\pred) mov r8 = \reg
  193. ;;
  194. (\pred) XEN_HYPER_ITC_D
  195. ;;
  196. .exitm
  197. .endif
  198. (\pred) mov \clob = r8
  199. (\pred) mov r8 = \reg
  200. ;;
  201. (\pred) XEN_HYPER_ITC_D
  202. ;;
  203. (\pred) mov r8 = \clob
  204. ;;
  205. .endm
  206. #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
  207. .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
  208. .ifc "\reg", "r8"
  209. (\pred_i)XEN_HYPER_ITC_I
  210. ;;
  211. (\pred_d)XEN_HYPER_ITC_D
  212. ;;
  213. .exitm
  214. .endif
  215. .ifc "\clob", "r8"
  216. mov r8 = \reg
  217. ;;
  218. (\pred_i)XEN_HYPER_ITC_I
  219. ;;
  220. (\pred_d)XEN_HYPER_ITC_D
  221. ;;
  222. .exitm
  223. .endif
  224. mov \clob = r8
  225. mov r8 = \reg
  226. ;;
  227. (\pred_i)XEN_HYPER_ITC_I
  228. ;;
  229. (\pred_d)XEN_HYPER_ITC_D
  230. ;;
  231. mov r8 = \clob
  232. ;;
  233. .endm
  234. #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
  235. __ITC_I_AND_D pred_i, pred_d, reg, clob
  236. .macro __THASH pred, reg0, reg1, clob
  237. .ifc "\reg0", "r8"
  238. (\pred) mov r8 = \reg1
  239. (\pred) XEN_HYPER_THASH
  240. .exitm
  241. .endc
  242. .ifc "\reg1", "r8"
  243. (\pred) XEN_HYPER_THASH
  244. ;;
  245. (\pred) mov \reg0 = r8
  246. ;;
  247. .exitm
  248. .endif
  249. .ifc "\clob", "r8"
  250. (\pred) mov r8 = \reg1
  251. (\pred) XEN_HYPER_THASH
  252. ;;
  253. (\pred) mov \reg0 = r8
  254. ;;
  255. .exitm
  256. .endif
  257. (\pred) mov \clob = r8
  258. (\pred) mov r8 = \reg1
  259. (\pred) XEN_HYPER_THASH
  260. ;;
  261. (\pred) mov \reg0 = r8
  262. (\pred) mov r8 = \clob
  263. ;;
  264. .endm
  265. #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
  266. #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
  267. mov clob0 = 1; \
  268. movl clob1 = XSI_PSR_IC; \
  269. ;; \
  270. st4 [clob1] = clob0 \
  271. ;;
  272. #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
  273. ;; \
  274. srlz.d; \
  275. mov clob1 = 1; \
  276. movl clob0 = XSI_PSR_IC; \
  277. ;; \
  278. st4 [clob0] = clob1
  279. #define RSM_PSR_IC(clob) \
  280. movl clob = XSI_PSR_IC; \
  281. ;; \
  282. st4 [clob] = r0; \
  283. ;;
  284. /* pred will be clobbered */
  285. #define MASK_TO_PEND_OFS (-1)
  286. #define SSM_PSR_I(pred, pred_clob, clob) \
  287. (pred) movl clob = XSI_PSR_I_ADDR \
  288. ;; \
  289. (pred) ld8 clob = [clob] \
  290. ;; \
  291. /* if (pred) vpsr.i = 1 */ \
  292. /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
  293. (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
  294. ;; \
  295. /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
  296. (pred) ld1 clob = [clob] \
  297. ;; \
  298. (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
  299. ;; \
  300. (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
  301. #define RSM_PSR_I(pred, clob0, clob1) \
  302. movl clob0 = XSI_PSR_I_ADDR; \
  303. mov clob1 = 1; \
  304. ;; \
  305. ld8 clob0 = [clob0]; \
  306. ;; \
  307. (pred) st1 [clob0] = clob1
  308. #define RSM_PSR_I_IC(clob0, clob1, clob2) \
  309. movl clob0 = XSI_PSR_I_ADDR; \
  310. movl clob1 = XSI_PSR_IC; \
  311. ;; \
  312. ld8 clob0 = [clob0]; \
  313. mov clob2 = 1; \
  314. ;; \
  315. /* note: clears both vpsr.i and vpsr.ic! */ \
  316. st1 [clob0] = clob2; \
  317. st4 [clob1] = r0; \
  318. ;;
  319. #define RSM_PSR_DT \
  320. XEN_HYPER_RSM_PSR_DT
  321. #define SSM_PSR_DT_AND_SRLZ_I \
  322. XEN_HYPER_SSM_PSR_DT
  323. #define BSW_0(clob0, clob1, clob2) \
  324. ;; \
  325. /* r16-r31 all now hold bank1 values */ \
  326. mov clob2 = ar.unat; \
  327. movl clob0 = XSI_BANK1_R16; \
  328. movl clob1 = XSI_BANK1_R16 + 8; \
  329. ;; \
  330. .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
  331. .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
  332. ;; \
  333. .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
  334. .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
  335. ;; \
  336. .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
  337. .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
  338. ;; \
  339. .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
  340. .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
  341. ;; \
  342. .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
  343. .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
  344. ;; \
  345. .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
  346. .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
  347. ;; \
  348. .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
  349. .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
  350. ;; \
  351. .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
  352. .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
  353. ;; \
  354. mov clob1 = ar.unat; \
  355. movl clob0 = XSI_B1NAT; \
  356. ;; \
  357. st8 [clob0] = clob1; \
  358. mov ar.unat = clob2; \
  359. movl clob0 = XSI_BANKNUM; \
  360. ;; \
  361. st4 [clob0] = r0
  362. /* FIXME: THIS CODE IS NOT NaT SAFE! */
  363. #define XEN_BSW_1(clob) \
  364. mov clob = ar.unat; \
  365. movl r30 = XSI_B1NAT; \
  366. ;; \
  367. ld8 r30 = [r30]; \
  368. mov r31 = 1; \
  369. ;; \
  370. mov ar.unat = r30; \
  371. movl r30 = XSI_BANKNUM; \
  372. ;; \
  373. st4 [r30] = r31; \
  374. movl r30 = XSI_BANK1_R16; \
  375. movl r31 = XSI_BANK1_R16+8; \
  376. ;; \
  377. ld8.fill r16 = [r30], 16; \
  378. ld8.fill r17 = [r31], 16; \
  379. ;; \
  380. ld8.fill r18 = [r30], 16; \
  381. ld8.fill r19 = [r31], 16; \
  382. ;; \
  383. ld8.fill r20 = [r30], 16; \
  384. ld8.fill r21 = [r31], 16; \
  385. ;; \
  386. ld8.fill r22 = [r30], 16; \
  387. ld8.fill r23 = [r31], 16; \
  388. ;; \
  389. ld8.fill r24 = [r30], 16; \
  390. ld8.fill r25 = [r31], 16; \
  391. ;; \
  392. ld8.fill r26 = [r30], 16; \
  393. ld8.fill r27 = [r31], 16; \
  394. ;; \
  395. ld8.fill r28 = [r30], 16; \
  396. ld8.fill r29 = [r31], 16; \
  397. ;; \
  398. ld8.fill r30 = [r30]; \
  399. ld8.fill r31 = [r31]; \
  400. ;; \
  401. mov ar.unat = clob
  402. #define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
  403. #define COVER \
  404. XEN_HYPER_COVER
  405. #define RFI \
  406. XEN_HYPER_RFI; \
  407. dv_serialize_data