hvCall.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * This file contains the generic code to perform a call to the
  3. * pSeries LPAR hypervisor.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. */
  10. #include <asm/hvcall.h>
  11. #include <asm/processor.h>
  12. #include <asm/ppc_asm.h>
  13. #include <asm/asm-offsets.h>
  14. #define STK_PARM(i) (48 + ((i)-3)*8)
  15. #ifdef CONFIG_TRACEPOINTS
  16. .section ".toc","aw"
  17. .globl hcall_tracepoint_refcount
  18. hcall_tracepoint_refcount:
  19. .llong 0
  20. .section ".text"
  21. /*
  22. * precall must preserve all registers. use unused STK_PARM()
  23. * areas to save snapshots and opcode. We branch around this
  24. * in early init (eg when populating the MMU hashtable) by using an
  25. * unconditional cpu feature.
  26. */
  27. #define HCALL_INST_PRECALL(FIRST_REG) \
  28. BEGIN_FTR_SECTION; \
  29. b 1f; \
  30. END_FTR_SECTION(0, 1); \
  31. ld r12,hcall_tracepoint_refcount@toc(r2); \
  32. cmpdi r12,0; \
  33. beq+ 1f; \
  34. mflr r0; \
  35. std r3,STK_PARM(r3)(r1); \
  36. std r4,STK_PARM(r4)(r1); \
  37. std r5,STK_PARM(r5)(r1); \
  38. std r6,STK_PARM(r6)(r1); \
  39. std r7,STK_PARM(r7)(r1); \
  40. std r8,STK_PARM(r8)(r1); \
  41. std r9,STK_PARM(r9)(r1); \
  42. std r10,STK_PARM(r10)(r1); \
  43. std r0,16(r1); \
  44. addi r4,r1,STK_PARM(FIRST_REG); \
  45. stdu r1,-STACK_FRAME_OVERHEAD(r1); \
  46. bl .__trace_hcall_entry; \
  47. addi r1,r1,STACK_FRAME_OVERHEAD; \
  48. ld r0,16(r1); \
  49. ld r3,STK_PARM(r3)(r1); \
  50. ld r4,STK_PARM(r4)(r1); \
  51. ld r5,STK_PARM(r5)(r1); \
  52. ld r6,STK_PARM(r6)(r1); \
  53. ld r7,STK_PARM(r7)(r1); \
  54. ld r8,STK_PARM(r8)(r1); \
  55. ld r9,STK_PARM(r9)(r1); \
  56. ld r10,STK_PARM(r10)(r1); \
  57. mtlr r0; \
  58. 1:
  59. /*
  60. * postcall is performed immediately before function return which
  61. * allows liberal use of volatile registers. We branch around this
  62. * in early init (eg when populating the MMU hashtable) by using an
  63. * unconditional cpu feature.
  64. */
  65. #define __HCALL_INST_POSTCALL \
  66. BEGIN_FTR_SECTION; \
  67. b 1f; \
  68. END_FTR_SECTION(0, 1); \
  69. ld r12,hcall_tracepoint_refcount@toc(r2); \
  70. cmpdi r12,0; \
  71. beq+ 1f; \
  72. mflr r0; \
  73. ld r6,STK_PARM(r3)(r1); \
  74. std r3,STK_PARM(r3)(r1); \
  75. mr r4,r3; \
  76. mr r3,r6; \
  77. std r0,16(r1); \
  78. stdu r1,-STACK_FRAME_OVERHEAD(r1); \
  79. bl .__trace_hcall_exit; \
  80. addi r1,r1,STACK_FRAME_OVERHEAD; \
  81. ld r0,16(r1); \
  82. ld r3,STK_PARM(r3)(r1); \
  83. mtlr r0; \
  84. 1:
  85. #define HCALL_INST_POSTCALL_NORETS \
  86. li r5,0; \
  87. __HCALL_INST_POSTCALL
  88. #define HCALL_INST_POSTCALL(BUFREG) \
  89. mr r5,BUFREG; \
  90. __HCALL_INST_POSTCALL
  91. #else
  92. #define HCALL_INST_PRECALL(FIRST_ARG)
  93. #define HCALL_INST_POSTCALL_NORETS
  94. #define HCALL_INST_POSTCALL(BUFREG)
  95. #endif
  96. .text
  97. _GLOBAL(plpar_hcall_norets)
  98. HMT_MEDIUM
  99. mfcr r0
  100. stw r0,8(r1)
  101. HCALL_INST_PRECALL(r4)
  102. HVSC /* invoke the hypervisor */
  103. HCALL_INST_POSTCALL_NORETS
  104. lwz r0,8(r1)
  105. mtcrf 0xff,r0
  106. blr /* return r3 = status */
  107. _GLOBAL(plpar_hcall)
  108. HMT_MEDIUM
  109. mfcr r0
  110. stw r0,8(r1)
  111. HCALL_INST_PRECALL(r5)
  112. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  113. mr r4,r5
  114. mr r5,r6
  115. mr r6,r7
  116. mr r7,r8
  117. mr r8,r9
  118. mr r9,r10
  119. HVSC /* invoke the hypervisor */
  120. ld r12,STK_PARM(r4)(r1)
  121. std r4, 0(r12)
  122. std r5, 8(r12)
  123. std r6, 16(r12)
  124. std r7, 24(r12)
  125. HCALL_INST_POSTCALL(r12)
  126. lwz r0,8(r1)
  127. mtcrf 0xff,r0
  128. blr /* return r3 = status */
  129. /*
  130. * plpar_hcall_raw can be called in real mode. kexec/kdump need some
  131. * hypervisor calls to be executed in real mode. So plpar_hcall_raw
  132. * does not access the per cpu hypervisor call statistics variables,
  133. * since these variables may not be present in the RMO region.
  134. */
  135. _GLOBAL(plpar_hcall_raw)
  136. HMT_MEDIUM
  137. mfcr r0
  138. stw r0,8(r1)
  139. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  140. mr r4,r5
  141. mr r5,r6
  142. mr r6,r7
  143. mr r7,r8
  144. mr r8,r9
  145. mr r9,r10
  146. HVSC /* invoke the hypervisor */
  147. ld r12,STK_PARM(r4)(r1)
  148. std r4, 0(r12)
  149. std r5, 8(r12)
  150. std r6, 16(r12)
  151. std r7, 24(r12)
  152. lwz r0,8(r1)
  153. mtcrf 0xff,r0
  154. blr /* return r3 = status */
  155. _GLOBAL(plpar_hcall9)
  156. HMT_MEDIUM
  157. mfcr r0
  158. stw r0,8(r1)
  159. HCALL_INST_PRECALL(r5)
  160. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  161. mr r4,r5
  162. mr r5,r6
  163. mr r6,r7
  164. mr r7,r8
  165. mr r8,r9
  166. mr r9,r10
  167. ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
  168. ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
  169. ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
  170. HVSC /* invoke the hypervisor */
  171. mr r0,r12
  172. ld r12,STK_PARM(r4)(r1)
  173. std r4, 0(r12)
  174. std r5, 8(r12)
  175. std r6, 16(r12)
  176. std r7, 24(r12)
  177. std r8, 32(r12)
  178. std r9, 40(r12)
  179. std r10,48(r12)
  180. std r11,56(r12)
  181. std r0, 64(r12)
  182. HCALL_INST_POSTCALL(r12)
  183. lwz r0,8(r1)
  184. mtcrf 0xff,r0
  185. blr /* return r3 = status */