hvCall.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * This file contains the generic code to perform a call to the
  3. * pSeries LPAR hypervisor.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. */
  10. #include <asm/hvcall.h>
  11. #include <asm/processor.h>
  12. #include <asm/ppc_asm.h>
  13. #include <asm/asm-offsets.h>
  14. #define STK_PARM(i) (48 + ((i)-3)*8)
  15. #ifdef CONFIG_TRACEPOINTS
  16. .section ".toc","aw"
  17. .globl hcall_tracepoint_refcount
  18. hcall_tracepoint_refcount:
  19. .llong 0
  20. .section ".text"
  21. /*
  22. * precall must preserve all registers. use unused STK_PARM()
  23. * areas to save snapshots and opcode. We branch around this
  24. * in early init (eg when populating the MMU hashtable) by using an
  25. * unconditional cpu feature.
  26. */
  27. #define HCALL_INST_PRECALL \
  28. BEGIN_FTR_SECTION; \
  29. b 1f; \
  30. END_FTR_SECTION(0, 1); \
  31. ld r12,hcall_tracepoint_refcount@toc(r2); \
  32. cmpdi r12,0; \
  33. beq+ 1f; \
  34. mflr r0; \
  35. std r3,STK_PARM(r3)(r1); \
  36. std r4,STK_PARM(r4)(r1); \
  37. std r5,STK_PARM(r5)(r1); \
  38. std r6,STK_PARM(r6)(r1); \
  39. std r7,STK_PARM(r7)(r1); \
  40. std r8,STK_PARM(r8)(r1); \
  41. std r9,STK_PARM(r9)(r1); \
  42. std r10,STK_PARM(r10)(r1); \
  43. std r0,16(r1); \
  44. stdu r1,-STACK_FRAME_OVERHEAD(r1); \
  45. bl .__trace_hcall_entry; \
  46. addi r1,r1,STACK_FRAME_OVERHEAD; \
  47. ld r0,16(r1); \
  48. ld r3,STK_PARM(r3)(r1); \
  49. ld r4,STK_PARM(r4)(r1); \
  50. ld r5,STK_PARM(r5)(r1); \
  51. ld r6,STK_PARM(r6)(r1); \
  52. ld r7,STK_PARM(r7)(r1); \
  53. ld r8,STK_PARM(r8)(r1); \
  54. ld r9,STK_PARM(r9)(r1); \
  55. ld r10,STK_PARM(r10)(r1); \
  56. mtlr r0; \
  57. 1:
  58. /*
  59. * postcall is performed immediately before function return which
  60. * allows liberal use of volatile registers. We branch around this
  61. * in early init (eg when populating the MMU hashtable) by using an
  62. * unconditional cpu feature.
  63. */
  64. #define HCALL_INST_POSTCALL \
  65. BEGIN_FTR_SECTION; \
  66. b 1f; \
  67. END_FTR_SECTION(0, 1); \
  68. ld r12,hcall_tracepoint_refcount@toc(r2); \
  69. cmpdi r12,0; \
  70. beq+ 1f; \
  71. mflr r0; \
  72. ld r6,STK_PARM(r3)(r1); \
  73. std r3,STK_PARM(r3)(r1); \
  74. mr r4,r3; \
  75. mr r3,r6; \
  76. std r0,16(r1); \
  77. stdu r1,-STACK_FRAME_OVERHEAD(r1); \
  78. bl .__trace_hcall_exit; \
  79. addi r1,r1,STACK_FRAME_OVERHEAD; \
  80. ld r0,16(r1); \
  81. ld r3,STK_PARM(r3)(r1); \
  82. mtlr r0; \
  83. 1:
  84. #else
  85. #define HCALL_INST_PRECALL
  86. #define HCALL_INST_POSTCALL
  87. #endif
  88. .text
  89. _GLOBAL(plpar_hcall_norets)
  90. HMT_MEDIUM
  91. mfcr r0
  92. stw r0,8(r1)
  93. HCALL_INST_PRECALL
  94. HVSC /* invoke the hypervisor */
  95. HCALL_INST_POSTCALL
  96. lwz r0,8(r1)
  97. mtcrf 0xff,r0
  98. blr /* return r3 = status */
  99. _GLOBAL(plpar_hcall)
  100. HMT_MEDIUM
  101. mfcr r0
  102. stw r0,8(r1)
  103. HCALL_INST_PRECALL
  104. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  105. mr r4,r5
  106. mr r5,r6
  107. mr r6,r7
  108. mr r7,r8
  109. mr r8,r9
  110. mr r9,r10
  111. HVSC /* invoke the hypervisor */
  112. ld r12,STK_PARM(r4)(r1)
  113. std r4, 0(r12)
  114. std r5, 8(r12)
  115. std r6, 16(r12)
  116. std r7, 24(r12)
  117. HCALL_INST_POSTCALL
  118. lwz r0,8(r1)
  119. mtcrf 0xff,r0
  120. blr /* return r3 = status */
  121. /*
  122. * plpar_hcall_raw can be called in real mode. kexec/kdump need some
  123. * hypervisor calls to be executed in real mode. So plpar_hcall_raw
  124. * does not access the per cpu hypervisor call statistics variables,
  125. * since these variables may not be present in the RMO region.
  126. */
  127. _GLOBAL(plpar_hcall_raw)
  128. HMT_MEDIUM
  129. mfcr r0
  130. stw r0,8(r1)
  131. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  132. mr r4,r5
  133. mr r5,r6
  134. mr r6,r7
  135. mr r7,r8
  136. mr r8,r9
  137. mr r9,r10
  138. HVSC /* invoke the hypervisor */
  139. ld r12,STK_PARM(r4)(r1)
  140. std r4, 0(r12)
  141. std r5, 8(r12)
  142. std r6, 16(r12)
  143. std r7, 24(r12)
  144. lwz r0,8(r1)
  145. mtcrf 0xff,r0
  146. blr /* return r3 = status */
  147. _GLOBAL(plpar_hcall9)
  148. HMT_MEDIUM
  149. mfcr r0
  150. stw r0,8(r1)
  151. HCALL_INST_PRECALL
  152. std r4,STK_PARM(r4)(r1) /* Save ret buffer */
  153. mr r4,r5
  154. mr r5,r6
  155. mr r6,r7
  156. mr r7,r8
  157. mr r8,r9
  158. mr r9,r10
  159. ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
  160. ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
  161. ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
  162. HVSC /* invoke the hypervisor */
  163. mr r0,r12
  164. ld r12,STK_PARM(r4)(r1)
  165. std r4, 0(r12)
  166. std r5, 8(r12)
  167. std r6, 16(r12)
  168. std r7, 24(r12)
  169. std r8, 32(r12)
  170. std r9, 40(r12)
  171. std r10,48(r12)
  172. std r11,56(r12)
  173. std r0, 64(r12)
  174. HCALL_INST_POSTCALL
  175. lwz r0,8(r1)
  176. mtcrf 0xff,r0
  177. blr /* return r3 = status */