cpu_setup_power4.S 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * This file contains low level CPU setup functions.
  3. * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. */
  11. #include <linux/config.h>
  12. #include <asm/processor.h>
  13. #include <asm/page.h>
  14. #include <asm/ppc_asm.h>
  15. #include <asm/cputable.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/cache.h>
  18. _GLOBAL(__970_cpu_preinit)
  19. /*
  20. * Deal only with PPC970 and PPC970FX.
  21. */
  22. mfspr r0,SPRN_PVR
  23. srwi r0,r0,16
  24. cmpwi cr0,r0,0x39
  25. cmpwi cr1,r0,0x3c
  26. cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
  27. bnelr
  28. /* Make sure HID4:rm_ci is off before MMU is turned off, that large
  29. * pages are enabled with HID4:61 and clear HID5:DCBZ_size and
  30. * HID5:DCBZ32_ill
  31. */
  32. li r0,0
  33. mfspr r11,SPRN_HID4
  34. rldimi r11,r0,40,23 /* clear bit 23 (rm_ci) */
  35. rldimi r11,r0,2,61 /* clear bit 61 (lg_pg_en) */
  36. sync
  37. mtspr SPRN_HID4,r11
  38. isync
  39. sync
  40. mfspr r11,SPRN_HID5
  41. rldimi r11,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
  42. sync
  43. mtspr SPRN_HID5,r11
  44. isync
  45. sync
  46. /* Setup some basic HID1 features */
  47. mfspr r0,SPRN_HID1
  48. li r11,0x1200 /* enable i-fetch cacheability */
  49. sldi r11,r11,44 /* and prefetch */
  50. or r0,r0,r11
  51. mtspr SPRN_HID1,r0
  52. mtspr SPRN_HID1,r0
  53. isync
  54. /* Clear HIOR */
  55. li r0,0
  56. sync
  57. mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
  58. isync
  59. blr
  60. _GLOBAL(__setup_cpu_ppc970)
  61. mfspr r0,SPRN_HID0
  62. li r11,5 /* clear DOZE and SLEEP */
  63. rldimi r0,r11,52,8 /* set NAP and DPM */
  64. mtspr SPRN_HID0,r0
  65. mfspr r0,SPRN_HID0
  66. mfspr r0,SPRN_HID0
  67. mfspr r0,SPRN_HID0
  68. mfspr r0,SPRN_HID0
  69. mfspr r0,SPRN_HID0
  70. mfspr r0,SPRN_HID0
  71. sync
  72. isync
  73. blr
  74. /* Definitions for the table use to save CPU states */
  75. #define CS_HID0 0
  76. #define CS_HID1 8
  77. #define CS_HID4 16
  78. #define CS_HID5 24
  79. #define CS_SIZE 32
  80. .data
  81. .balign L1_CACHE_BYTES
  82. cpu_state_storage:
  83. .space CS_SIZE
  84. .balign L1_CACHE_BYTES,0
  85. .text
  86. /* Called in normal context to backup CPU 0 state. This
  87. * does not include cache settings. This function is also
  88. * called for machine sleep. This does not include the MMU
  89. * setup, BATs, etc... but rather the "special" registers
  90. * like HID0, HID1, HID4, etc...
  91. */
  92. _GLOBAL(__save_cpu_setup)
  93. /* Some CR fields are volatile, we back it up all */
  94. mfcr r7
  95. /* Get storage ptr */
  96. lis r5,cpu_state_storage@h
  97. ori r5,r5,cpu_state_storage@l
  98. /* We only deal with 970 for now */
  99. mfspr r0,SPRN_PVR
  100. srwi r0,r0,16
  101. cmpwi cr0,r0,0x39
  102. cmpwi cr1,r0,0x3c
  103. cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
  104. bne 1f
  105. /* Save HID0,1,4 and 5 */
  106. mfspr r3,SPRN_HID0
  107. std r3,CS_HID0(r5)
  108. mfspr r3,SPRN_HID1
  109. std r3,CS_HID1(r5)
  110. mfspr r3,SPRN_HID4
  111. std r3,CS_HID4(r5)
  112. mfspr r3,SPRN_HID5
  113. std r3,CS_HID5(r5)
  114. 1:
  115. mtcr r7
  116. blr
  117. /* Called with no MMU context (typically MSR:IR/DR off) to
  118. * restore CPU state as backed up by the previous
  119. * function. This does not include cache setting
  120. */
  121. _GLOBAL(__restore_cpu_setup)
  122. /* Some CR fields are volatile, we back it up all */
  123. mfcr r7
  124. /* Get storage ptr */
  125. lis r5,(cpu_state_storage-KERNELBASE)@h
  126. ori r5,r5,cpu_state_storage@l
  127. /* We only deal with 970 for now */
  128. mfspr r0,SPRN_PVR
  129. srwi r0,r0,16
  130. cmpwi cr0,r0,0x39
  131. cmpwi cr1,r0,0x3c
  132. cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
  133. bne 1f
  134. /* Clear interrupt prefix */
  135. li r0,0
  136. sync
  137. mtspr SPRN_HIOR,0
  138. isync
  139. /* Restore HID0 */
  140. ld r3,CS_HID0(r5)
  141. sync
  142. isync
  143. mtspr SPRN_HID0,r3
  144. mfspr r3,SPRN_HID0
  145. mfspr r3,SPRN_HID0
  146. mfspr r3,SPRN_HID0
  147. mfspr r3,SPRN_HID0
  148. mfspr r3,SPRN_HID0
  149. mfspr r3,SPRN_HID0
  150. sync
  151. isync
  152. /* Restore HID1 */
  153. ld r3,CS_HID1(r5)
  154. sync
  155. isync
  156. mtspr SPRN_HID1,r3
  157. mtspr SPRN_HID1,r3
  158. sync
  159. isync
  160. /* Restore HID4 */
  161. ld r3,CS_HID4(r5)
  162. sync
  163. isync
  164. mtspr SPRN_HID4,r3
  165. sync
  166. isync
  167. /* Restore HID5 */
  168. ld r3,CS_HID5(r5)
  169. sync
  170. isync
  171. mtspr SPRN_HID5,r3
  172. sync
  173. isync
  174. 1:
  175. mtcr r7
  176. blr