paravirt_privop.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /******************************************************************************
  2. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  3. * VA Linux Systems Japan K.K.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. #ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
  21. #define _ASM_IA64_PARAVIRT_PRIVOP_H
  22. #ifdef CONFIG_PARAVIRT
  23. #ifndef __ASSEMBLY__
  24. #include <linux/types.h>
  25. #include <asm/kregs.h> /* for IA64_PSR_I */
  26. /******************************************************************************
  27. * replacement of intrinsics operations.
  28. */
  29. struct pv_cpu_ops {
  30. void (*fc)(void *addr);
  31. unsigned long (*thash)(unsigned long addr);
  32. unsigned long (*get_cpuid)(int index);
  33. unsigned long (*get_pmd)(int index);
  34. unsigned long (*getreg)(int reg);
  35. void (*setreg)(int reg, unsigned long val);
  36. void (*ptcga)(unsigned long addr, unsigned long size);
  37. unsigned long (*get_rr)(unsigned long index);
  38. void (*set_rr)(unsigned long index, unsigned long val);
  39. void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
  40. unsigned long val2, unsigned long val3,
  41. unsigned long val4);
  42. void (*ssm_i)(void);
  43. void (*rsm_i)(void);
  44. unsigned long (*get_psr_i)(void);
  45. void (*intrin_local_irq_restore)(unsigned long flags);
  46. };
  47. extern struct pv_cpu_ops pv_cpu_ops;
  48. extern void ia64_native_setreg_func(int regnum, unsigned long val);
  49. extern unsigned long ia64_native_getreg_func(int regnum);
  50. /************************************************/
  51. /* Instructions paravirtualized for performance */
  52. /************************************************/
  53. #ifndef ASM_SUPPORTED
  54. #define paravirt_ssm_i() pv_cpu_ops.ssm_i()
  55. #define paravirt_rsm_i() pv_cpu_ops.rsm_i()
  56. #define __paravirt_getreg() pv_cpu_ops.getreg()
  57. #endif
  58. /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
  59. * static inline function doesn't satisfy it. */
  60. #define paravirt_ssm(mask) \
  61. do { \
  62. if ((mask) == IA64_PSR_I) \
  63. paravirt_ssm_i(); \
  64. else \
  65. ia64_native_ssm(mask); \
  66. } while (0)
  67. #define paravirt_rsm(mask) \
  68. do { \
  69. if ((mask) == IA64_PSR_I) \
  70. paravirt_rsm_i(); \
  71. else \
  72. ia64_native_rsm(mask); \
  73. } while (0)
  74. /* returned ip value should be the one in the caller,
  75. * not in __paravirt_getreg() */
  76. #define paravirt_getreg(reg) \
  77. ({ \
  78. unsigned long res; \
  79. if ((reg) == _IA64_REG_IP) \
  80. res = ia64_native_getreg(_IA64_REG_IP); \
  81. else \
  82. res = __paravirt_getreg(reg); \
  83. res; \
  84. })
  85. /******************************************************************************
  86. * replacement of hand written assembly codes.
  87. */
  88. struct pv_cpu_asm_switch {
  89. unsigned long switch_to;
  90. unsigned long leave_syscall;
  91. unsigned long work_processed_syscall;
  92. unsigned long leave_kernel;
  93. };
  94. void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
  95. #endif /* __ASSEMBLY__ */
  96. #define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
  97. #else
  98. /* fallback for native case */
  99. #define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
  100. #endif /* CONFIG_PARAVIRT */
  101. #if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
  102. #define paravirt_dv_serialize_data() ia64_dv_serialize_data()
  103. #else
  104. #define paravirt_dv_serialize_data() /* nothing */
  105. #endif
  106. /* these routines utilize privilege-sensitive or performance-sensitive
  107. * privileged instructions so the code must be replaced with
  108. * paravirtualized versions */
  109. #define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
  110. #define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
  111. #define ia64_work_processed_syscall \
  112. IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
  113. #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
  114. #if defined(CONFIG_PARAVIRT)
  115. /******************************************************************************
  116. * binary patching infrastructure
  117. */
  118. #define PARAVIRT_PATCH_TYPE_FC 1
  119. #define PARAVIRT_PATCH_TYPE_THASH 2
  120. #define PARAVIRT_PATCH_TYPE_GET_CPUID 3
  121. #define PARAVIRT_PATCH_TYPE_GET_PMD 4
  122. #define PARAVIRT_PATCH_TYPE_PTCGA 5
  123. #define PARAVIRT_PATCH_TYPE_GET_RR 6
  124. #define PARAVIRT_PATCH_TYPE_SET_RR 7
  125. #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
  126. #define PARAVIRT_PATCH_TYPE_SSM_I 9
  127. #define PARAVIRT_PATCH_TYPE_RSM_I 10
  128. #define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
  129. #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
  130. /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
  131. #define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
  132. #define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
  133. /*
  134. * struct task_struct* (*ia64_switch_to)(void* next_task);
  135. * void *ia64_leave_syscall;
  136. * void *ia64_work_processed_syscall
  137. * void *ia64_leave_kernel;
  138. */
  139. #define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
  140. #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
  141. (PARAVIRT_PATCH_TYPE_BR_START + 0)
  142. #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
  143. (PARAVIRT_PATCH_TYPE_BR_START + 1)
  144. #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
  145. (PARAVIRT_PATCH_TYPE_BR_START + 2)
  146. #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
  147. (PARAVIRT_PATCH_TYPE_BR_START + 3)
  148. #ifdef ASM_SUPPORTED
  149. #include <asm/paravirt_patch.h>
  150. /*
  151. * pv_cpu_ops calling stub.
  152. * normal function call convension can't be written by gcc
  153. * inline assembly.
  154. *
  155. * from the caller's point of view,
  156. * the following registers will be clobbered.
  157. * r2, r3
  158. * r8-r15
  159. * r16, r17
  160. * b6, b7
  161. * p6-p15
  162. * ar.ccv
  163. *
  164. * from the callee's point of view ,
  165. * the following registers can be used.
  166. * r2, r3: scratch
  167. * r8: scratch, input argument0 and return value
  168. * r0-r15: scratch, input argument1-5
  169. * b6: return pointer
  170. * b7: scratch
  171. * p6-p15: scratch
  172. * ar.ccv: scratch
  173. *
  174. * other registers must not be changed. especially
  175. * b0: rp: preserved. gcc ignores b0 in clobbered register.
  176. * r16: saved gp
  177. */
  178. /* 5 bundles */
  179. #define __PARAVIRT_BR \
  180. ";;\n" \
  181. "{ .mlx\n" \
  182. "nop 0\n" \
  183. "movl r2 = %[op_addr]\n"/* get function pointer address */ \
  184. ";;\n" \
  185. "}\n" \
  186. "1:\n" \
  187. "{ .mii\n" \
  188. "ld8 r2 = [r2]\n" /* load function descriptor address */ \
  189. "mov r17 = ip\n" /* get ip to calc return address */ \
  190. "mov r16 = gp\n" /* save gp */ \
  191. ";;\n" \
  192. "}\n" \
  193. "{ .mii\n" \
  194. "ld8 r3 = [r2], 8\n" /* load entry address */ \
  195. "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
  196. ";;\n" \
  197. "mov b7 = r3\n" /* set entry address */ \
  198. "}\n" \
  199. "{ .mib\n" \
  200. "ld8 gp = [r2]\n" /* load gp value */ \
  201. "mov b6 = r17\n" /* set return address */ \
  202. "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
  203. "}\n" \
  204. "1:\n" \
  205. "{ .mii\n" \
  206. "mov gp = r16\n" /* restore gp value */ \
  207. "nop 0\n" \
  208. "nop 0\n" \
  209. ";;\n" \
  210. "}\n"
  211. #define PARAVIRT_OP(op) \
  212. [op_addr] "i"(&pv_cpu_ops.op)
  213. #define PARAVIRT_TYPE(type) \
  214. PARAVIRT_PATCH_TYPE_ ## type
  215. #define PARAVIRT_REG_CLOBBERS0 \
  216. "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
  217. "r15", "r16", "r17"
  218. #define PARAVIRT_REG_CLOBBERS1 \
  219. "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
  220. "r15", "r16", "r17"
  221. #define PARAVIRT_REG_CLOBBERS2 \
  222. "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
  223. "r15", "r16", "r17"
  224. #define PARAVIRT_REG_CLOBBERS5 \
  225. "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
  226. "r15", "r16", "r17"
  227. #define PARAVIRT_BR_CLOBBERS \
  228. "b6", "b7"
  229. #define PARAVIRT_PR_CLOBBERS \
  230. "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
  231. #define PARAVIRT_AR_CLOBBERS \
  232. "ar.ccv"
  233. #define PARAVIRT_CLOBBERS0 \
  234. PARAVIRT_REG_CLOBBERS0, \
  235. PARAVIRT_BR_CLOBBERS, \
  236. PARAVIRT_PR_CLOBBERS, \
  237. PARAVIRT_AR_CLOBBERS, \
  238. "memory"
  239. #define PARAVIRT_CLOBBERS1 \
  240. PARAVIRT_REG_CLOBBERS1, \
  241. PARAVIRT_BR_CLOBBERS, \
  242. PARAVIRT_PR_CLOBBERS, \
  243. PARAVIRT_AR_CLOBBERS, \
  244. "memory"
  245. #define PARAVIRT_CLOBBERS2 \
  246. PARAVIRT_REG_CLOBBERS2, \
  247. PARAVIRT_BR_CLOBBERS, \
  248. PARAVIRT_PR_CLOBBERS, \
  249. PARAVIRT_AR_CLOBBERS, \
  250. "memory"
  251. #define PARAVIRT_CLOBBERS5 \
  252. PARAVIRT_REG_CLOBBERS5, \
  253. PARAVIRT_BR_CLOBBERS, \
  254. PARAVIRT_PR_CLOBBERS, \
  255. PARAVIRT_AR_CLOBBERS, \
  256. "memory"
  257. #define PARAVIRT_BR0(op, type) \
  258. register unsigned long ia64_clobber asm ("r8"); \
  259. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  260. PARAVIRT_TYPE(type)) \
  261. : "=r"(ia64_clobber) \
  262. : PARAVIRT_OP(op) \
  263. : PARAVIRT_CLOBBERS0)
  264. #define PARAVIRT_BR0_RET(op, type) \
  265. register unsigned long ia64_intri_res asm ("r8"); \
  266. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  267. PARAVIRT_TYPE(type)) \
  268. : "=r"(ia64_intri_res) \
  269. : PARAVIRT_OP(op) \
  270. : PARAVIRT_CLOBBERS0)
  271. #define PARAVIRT_BR1(op, type, arg1) \
  272. register unsigned long __##arg1 asm ("r8") = arg1; \
  273. register unsigned long ia64_clobber asm ("r8"); \
  274. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  275. PARAVIRT_TYPE(type)) \
  276. : "=r"(ia64_clobber) \
  277. : PARAVIRT_OP(op), "0"(__##arg1) \
  278. : PARAVIRT_CLOBBERS1)
  279. #define PARAVIRT_BR1_RET(op, type, arg1) \
  280. register unsigned long ia64_intri_res asm ("r8"); \
  281. register unsigned long __##arg1 asm ("r8") = arg1; \
  282. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  283. PARAVIRT_TYPE(type)) \
  284. : "=r"(ia64_intri_res) \
  285. : PARAVIRT_OP(op), "0"(__##arg1) \
  286. : PARAVIRT_CLOBBERS1)
  287. #define PARAVIRT_BR1_VOID(op, type, arg1) \
  288. register void *__##arg1 asm ("r8") = arg1; \
  289. register unsigned long ia64_clobber asm ("r8"); \
  290. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  291. PARAVIRT_TYPE(type)) \
  292. : "=r"(ia64_clobber) \
  293. : PARAVIRT_OP(op), "0"(__##arg1) \
  294. : PARAVIRT_CLOBBERS1)
  295. #define PARAVIRT_BR2(op, type, arg1, arg2) \
  296. register unsigned long __##arg1 asm ("r8") = arg1; \
  297. register unsigned long __##arg2 asm ("r9") = arg2; \
  298. register unsigned long ia64_clobber1 asm ("r8"); \
  299. register unsigned long ia64_clobber2 asm ("r9"); \
  300. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  301. PARAVIRT_TYPE(type)) \
  302. : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
  303. : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
  304. : PARAVIRT_CLOBBERS2)
  305. #define PARAVIRT_DEFINE_CPU_OP0(op, type) \
  306. static inline void \
  307. paravirt_ ## op (void) \
  308. { \
  309. PARAVIRT_BR0(op, type); \
  310. }
  311. #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
  312. static inline unsigned long \
  313. paravirt_ ## op (void) \
  314. { \
  315. PARAVIRT_BR0_RET(op, type); \
  316. return ia64_intri_res; \
  317. }
  318. #define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
  319. static inline void \
  320. paravirt_ ## op (void *arg1) \
  321. { \
  322. PARAVIRT_BR1_VOID(op, type, arg1); \
  323. }
  324. #define PARAVIRT_DEFINE_CPU_OP1(op, type) \
  325. static inline void \
  326. paravirt_ ## op (unsigned long arg1) \
  327. { \
  328. PARAVIRT_BR1(op, type, arg1); \
  329. }
  330. #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
  331. static inline unsigned long \
  332. paravirt_ ## op (unsigned long arg1) \
  333. { \
  334. PARAVIRT_BR1_RET(op, type, arg1); \
  335. return ia64_intri_res; \
  336. }
  337. #define PARAVIRT_DEFINE_CPU_OP2(op, type) \
  338. static inline void \
  339. paravirt_ ## op (unsigned long arg1, \
  340. unsigned long arg2) \
  341. { \
  342. PARAVIRT_BR2(op, type, arg1, arg2); \
  343. }
  344. PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
  345. PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
  346. PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
  347. PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
  348. PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
  349. PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
  350. PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
  351. PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
  352. PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
  353. PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
  354. PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
  355. static inline void
  356. paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
  357. unsigned long val2, unsigned long val3,
  358. unsigned long val4)
  359. {
  360. register unsigned long __val0 asm ("r8") = val0;
  361. register unsigned long __val1 asm ("r9") = val1;
  362. register unsigned long __val2 asm ("r10") = val2;
  363. register unsigned long __val3 asm ("r11") = val3;
  364. register unsigned long __val4 asm ("r14") = val4;
  365. register unsigned long ia64_clobber0 asm ("r8");
  366. register unsigned long ia64_clobber1 asm ("r9");
  367. register unsigned long ia64_clobber2 asm ("r10");
  368. register unsigned long ia64_clobber3 asm ("r11");
  369. register unsigned long ia64_clobber4 asm ("r14");
  370. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
  371. PARAVIRT_TYPE(SET_RR0_TO_RR4))
  372. : "=r"(ia64_clobber0),
  373. "=r"(ia64_clobber1),
  374. "=r"(ia64_clobber2),
  375. "=r"(ia64_clobber3),
  376. "=r"(ia64_clobber4)
  377. : PARAVIRT_OP(set_rr0_to_rr4),
  378. "0"(__val0), "1"(__val1), "2"(__val2),
  379. "3"(__val3), "4"(__val4)
  380. : PARAVIRT_CLOBBERS5);
  381. }
  382. /* unsigned long paravirt_getreg(int reg) */
  383. #define __paravirt_getreg(reg) \
  384. ({ \
  385. register unsigned long ia64_intri_res asm ("r8"); \
  386. register unsigned long __reg asm ("r8") = (reg); \
  387. \
  388. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  389. PARAVIRT_TYPE(GETREG) \
  390. + (reg)) \
  391. : "=r"(ia64_intri_res) \
  392. : PARAVIRT_OP(getreg), "0"(__reg) \
  393. : PARAVIRT_CLOBBERS1); \
  394. \
  395. ia64_intri_res; \
  396. })
  397. /* void paravirt_setreg(int reg, unsigned long val) */
  398. #define paravirt_setreg(reg, val) \
  399. do { \
  400. register unsigned long __val asm ("r8") = val; \
  401. register unsigned long __reg asm ("r9") = reg; \
  402. register unsigned long ia64_clobber1 asm ("r8"); \
  403. register unsigned long ia64_clobber2 asm ("r9"); \
  404. \
  405. asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
  406. PARAVIRT_TYPE(SETREG) \
  407. + (reg)) \
  408. : "=r"(ia64_clobber1), \
  409. "=r"(ia64_clobber2) \
  410. : PARAVIRT_OP(setreg), \
  411. "1"(__reg), "0"(__val) \
  412. : PARAVIRT_CLOBBERS2); \
  413. } while (0)
  414. #endif /* ASM_SUPPORTED */
  415. #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
  416. #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */