idle_6xx.S 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * This file contains the power_save function for 6xx & 7xxx CPUs
  3. * rewritten in assembler
  4. *
  5. * Warning ! This code assumes that if your machine has a 750fx
  6. * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
  7. * if this is not the case some additional changes will have to
  8. * be done to check a runtime var (a bit like powersave-nap)
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <linux/config.h>
  16. #include <linux/threads.h>
  17. #include <asm/reg.h>
  18. #include <asm/page.h>
  19. #include <asm/cputable.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/ppc_asm.h>
  22. #include <asm/asm-offsets.h>
  23. #undef DEBUG
  24. .text
  25. /*
  26. * Init idle, called at early CPU setup time from head.S for each CPU
  27. * Make sure no rest of NAP mode remains in HID0, save default
  28. * values for some CPU specific registers. Called with r24
  29. * containing CPU number and r3 reloc offset
  30. */
  31. _GLOBAL(init_idle_6xx)
  32. BEGIN_FTR_SECTION
  33. mfspr r4,SPRN_HID0
  34. rlwinm r4,r4,0,10,8 /* Clear NAP */
  35. mtspr SPRN_HID0, r4
  36. b 1f
  37. END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
  38. blr
  39. 1:
  40. slwi r5,r24,2
  41. add r5,r5,r3
  42. BEGIN_FTR_SECTION
  43. mfspr r4,SPRN_MSSCR0
  44. addis r6,r5, nap_save_msscr0@ha
  45. stw r4,nap_save_msscr0@l(r6)
  46. END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
  47. BEGIN_FTR_SECTION
  48. mfspr r4,SPRN_HID1
  49. addis r6,r5,nap_save_hid1@ha
  50. stw r4,nap_save_hid1@l(r6)
  51. END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
  52. blr
  53. /*
  54. * Here is the power_save_6xx function. This could eventually be
  55. * split into several functions & changing the function pointer
  56. * depending on the various features.
  57. */
  58. _GLOBAL(ppc6xx_idle)
  59. /* Check if we can nap or doze, put HID0 mask in r3
  60. */
  61. lis r3, 0
  62. BEGIN_FTR_SECTION
  63. lis r3,HID0_DOZE@h
  64. END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
  65. BEGIN_FTR_SECTION
  66. /* We must dynamically check for the NAP feature as it
  67. * can be cleared by CPU init after the fixups are done
  68. */
  69. lis r4,cur_cpu_spec@ha
  70. lwz r4,cur_cpu_spec@l(r4)
  71. lwz r4,CPU_SPEC_FEATURES(r4)
  72. andi. r0,r4,CPU_FTR_CAN_NAP
  73. beq 1f
  74. /* Now check if user or arch enabled NAP mode */
  75. lis r4,powersave_nap@ha
  76. lwz r4,powersave_nap@l(r4)
  77. cmpwi 0,r4,0
  78. beq 1f
  79. lis r3,HID0_NAP@h
  80. 1:
  81. END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
  82. cmpwi 0,r3,0
  83. beqlr
  84. /* Clear MSR:EE */
  85. mfmsr r7
  86. rlwinm r0,r7,0,17,15
  87. mtmsr r0
  88. /* Check current_thread_info()->flags */
  89. rlwinm r4,r1,0,0,18
  90. lwz r4,TI_FLAGS(r4)
  91. andi. r0,r4,_TIF_NEED_RESCHED
  92. beq 1f
  93. mtmsr r7 /* out of line this ? */
  94. blr
  95. 1:
  96. /* Some pre-nap cleanups needed on some CPUs */
  97. andis. r0,r3,HID0_NAP@h
  98. beq 2f
  99. BEGIN_FTR_SECTION
  100. /* Disable L2 prefetch on some 745x and try to ensure
  101. * L2 prefetch engines are idle. As explained by errata
  102. * text, we can't be sure they are, we just hope very hard
  103. * that well be enough (sic !). At least I noticed Apple
  104. * doesn't even bother doing the dcbf's here...
  105. */
  106. mfspr r4,SPRN_MSSCR0
  107. rlwinm r4,r4,0,0,29
  108. sync
  109. mtspr SPRN_MSSCR0,r4
  110. sync
  111. isync
  112. lis r4,KERNELBASE@h
  113. dcbf 0,r4
  114. dcbf 0,r4
  115. dcbf 0,r4
  116. dcbf 0,r4
  117. END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
  118. #ifdef DEBUG
  119. lis r6,nap_enter_count@ha
  120. lwz r4,nap_enter_count@l(r6)
  121. addi r4,r4,1
  122. stw r4,nap_enter_count@l(r6)
  123. #endif
  124. 2:
  125. BEGIN_FTR_SECTION
  126. /* Go to low speed mode on some 750FX */
  127. lis r4,powersave_lowspeed@ha
  128. lwz r4,powersave_lowspeed@l(r4)
  129. cmpwi 0,r4,0
  130. beq 1f
  131. mfspr r4,SPRN_HID1
  132. oris r4,r4,0x0001
  133. mtspr SPRN_HID1,r4
  134. 1:
  135. END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
  136. /* Go to NAP or DOZE now */
  137. mfspr r4,SPRN_HID0
  138. lis r5,(HID0_NAP|HID0_SLEEP)@h
  139. BEGIN_FTR_SECTION
  140. oris r5,r5,HID0_DOZE@h
  141. END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
  142. andc r4,r4,r5
  143. or r4,r4,r3
  144. BEGIN_FTR_SECTION
  145. oris r4,r4,HID0_DPM@h /* that should be done once for all */
  146. END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
  147. mtspr SPRN_HID0,r4
  148. BEGIN_FTR_SECTION
  149. DSSALL
  150. sync
  151. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  152. ori r7,r7,MSR_EE /* Could be ommited (already set) */
  153. oris r7,r7,MSR_POW@h
  154. sync
  155. isync
  156. mtmsr r7
  157. isync
  158. sync
  159. blr
  160. /*
  161. * Return from NAP/DOZE mode, restore some CPU specific registers,
  162. * we are called with DR/IR still off and r2 containing physical
  163. * address of current.
  164. */
  165. _GLOBAL(power_save_6xx_restore)
  166. mfspr r11,SPRN_HID0
  167. rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
  168. cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
  169. BEGIN_FTR_SECTION
  170. rlwinm r11,r11,0,9,7 /* Clear DOZE */
  171. END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
  172. mtspr SPRN_HID0, r11
  173. #ifdef DEBUG
  174. beq cr1,1f
  175. lis r11,(nap_return_count-KERNELBASE)@ha
  176. lwz r9,nap_return_count@l(r11)
  177. addi r9,r9,1
  178. stw r9,nap_return_count@l(r11)
  179. 1:
  180. #endif
  181. rlwinm r9,r1,0,0,18
  182. tophys(r9,r9)
  183. lwz r11,TI_CPU(r9)
  184. slwi r11,r11,2
  185. /* Todo make sure all these are in the same page
  186. * and load r22 (@ha part + CPU offset) only once
  187. */
  188. BEGIN_FTR_SECTION
  189. beq cr1,1f
  190. addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
  191. lwz r9,nap_save_msscr0@l(r9)
  192. mtspr SPRN_MSSCR0, r9
  193. sync
  194. isync
  195. 1:
  196. END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
  197. BEGIN_FTR_SECTION
  198. addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
  199. lwz r9,nap_save_hid1@l(r9)
  200. mtspr SPRN_HID1, r9
  201. END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
  202. b transfer_to_handler_cont
  203. .data
  204. _GLOBAL(nap_save_msscr0)
  205. .space 4*NR_CPUS
  206. _GLOBAL(nap_save_hid1)
  207. .space 4*NR_CPUS
  208. _GLOBAL(powersave_nap)
  209. .long 0
  210. _GLOBAL(powersave_lowspeed)
  211. .long 0
  212. #ifdef DEBUG
  213. _GLOBAL(nap_enter_count)
  214. .space 4
  215. _GLOBAL(nap_return_count)
  216. .space 4
  217. #endif