swsusp_32.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. #include <linux/threads.h>
  2. #include <asm/processor.h>
  3. #include <asm/page.h>
  4. #include <asm/cputable.h>
  5. #include <asm/thread_info.h>
  6. #include <asm/ppc_asm.h>
  7. #include <asm/asm-offsets.h>
  8. /*
  9. * Structure for storing CPU registers on the save area.
  10. */
  11. #define SL_SP 0
  12. #define SL_PC 4
  13. #define SL_MSR 8
  14. #define SL_SDR1 0xc
  15. #define SL_SPRG0 0x10 /* 4 sprg's */
  16. #define SL_DBAT0 0x20
  17. #define SL_IBAT0 0x28
  18. #define SL_DBAT1 0x30
  19. #define SL_IBAT1 0x38
  20. #define SL_DBAT2 0x40
  21. #define SL_IBAT2 0x48
  22. #define SL_DBAT3 0x50
  23. #define SL_IBAT3 0x58
  24. #define SL_TB 0x60
  25. #define SL_R2 0x68
  26. #define SL_CR 0x6c
  27. #define SL_LR 0x70
  28. #define SL_R12 0x74 /* r12 to r31 */
  29. #define SL_SIZE (SL_R12 + 80)
  30. .section .data
  31. .align 5
  32. _GLOBAL(swsusp_save_area)
  33. .space SL_SIZE
  34. .section .text
  35. .align 5
  36. _GLOBAL(swsusp_arch_suspend)
  37. lis r11,swsusp_save_area@h
  38. ori r11,r11,swsusp_save_area@l
  39. mflr r0
  40. stw r0,SL_LR(r11)
  41. mfcr r0
  42. stw r0,SL_CR(r11)
  43. stw r1,SL_SP(r11)
  44. stw r2,SL_R2(r11)
  45. stmw r12,SL_R12(r11)
  46. /* Save MSR & SDR1 */
  47. mfmsr r4
  48. stw r4,SL_MSR(r11)
  49. mfsdr1 r4
  50. stw r4,SL_SDR1(r11)
  51. /* Get a stable timebase and save it */
  52. 1: mftbu r4
  53. stw r4,SL_TB(r11)
  54. mftb r5
  55. stw r5,SL_TB+4(r11)
  56. mftbu r3
  57. cmpw r3,r4
  58. bne 1b
  59. /* Save SPRGs */
  60. mfsprg r4,0
  61. stw r4,SL_SPRG0(r11)
  62. mfsprg r4,1
  63. stw r4,SL_SPRG0+4(r11)
  64. mfsprg r4,2
  65. stw r4,SL_SPRG0+8(r11)
  66. mfsprg r4,3
  67. stw r4,SL_SPRG0+12(r11)
  68. /* Save BATs */
  69. mfdbatu r4,0
  70. stw r4,SL_DBAT0(r11)
  71. mfdbatl r4,0
  72. stw r4,SL_DBAT0+4(r11)
  73. mfdbatu r4,1
  74. stw r4,SL_DBAT1(r11)
  75. mfdbatl r4,1
  76. stw r4,SL_DBAT1+4(r11)
  77. mfdbatu r4,2
  78. stw r4,SL_DBAT2(r11)
  79. mfdbatl r4,2
  80. stw r4,SL_DBAT2+4(r11)
  81. mfdbatu r4,3
  82. stw r4,SL_DBAT3(r11)
  83. mfdbatl r4,3
  84. stw r4,SL_DBAT3+4(r11)
  85. mfibatu r4,0
  86. stw r4,SL_IBAT0(r11)
  87. mfibatl r4,0
  88. stw r4,SL_IBAT0+4(r11)
  89. mfibatu r4,1
  90. stw r4,SL_IBAT1(r11)
  91. mfibatl r4,1
  92. stw r4,SL_IBAT1+4(r11)
  93. mfibatu r4,2
  94. stw r4,SL_IBAT2(r11)
  95. mfibatl r4,2
  96. stw r4,SL_IBAT2+4(r11)
  97. mfibatu r4,3
  98. stw r4,SL_IBAT3(r11)
  99. mfibatl r4,3
  100. stw r4,SL_IBAT3+4(r11)
  101. #if 0
  102. /* Backup various CPU config stuffs */
  103. bl __save_cpu_setup
  104. #endif
  105. /* Call the low level suspend stuff (we should probably have made
  106. * a stackframe...
  107. */
  108. bl swsusp_save
  109. /* Restore LR from the save area */
  110. lis r11,swsusp_save_area@h
  111. ori r11,r11,swsusp_save_area@l
  112. lwz r0,SL_LR(r11)
  113. mtlr r0
  114. blr
  115. /* Resume code */
  116. _GLOBAL(swsusp_arch_resume)
  117. /* Stop pending alitvec streams and memory accesses */
  118. BEGIN_FTR_SECTION
  119. DSSALL
  120. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  121. sync
  122. /* Disable MSR:DR to make sure we don't take a TLB or
  123. * hash miss during the copy, as our hash table will
  124. * for a while be unuseable. For .text, we assume we are
  125. * covered by a BAT. This works only for non-G5 at this
  126. * point. G5 will need a better approach, possibly using
  127. * a small temporary hash table filled with large mappings,
  128. * disabling the MMU completely isn't a good option for
  129. * performance reasons.
  130. * (Note that 750's may have the same performance issue as
  131. * the G5 in this case, we should investigate using moving
  132. * BATs for these CPUs)
  133. */
  134. mfmsr r0
  135. sync
  136. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  137. mtmsr r0
  138. sync
  139. isync
  140. /* Load ptr the list of pages to copy in r3 */
  141. lis r11,(restore_pblist - KERNELBASE)@h
  142. ori r11,r11,restore_pblist@l
  143. lwz r10,0(r11)
  144. /* Copy the pages. This is a very basic implementation, to
  145. * be replaced by something more cache efficient */
  146. 1:
  147. tophys(r3,r10)
  148. li r0,256
  149. mtctr r0
  150. lwz r11,pbe_address(r3) /* source */
  151. tophys(r5,r11)
  152. lwz r10,pbe_orig_address(r3) /* destination */
  153. tophys(r6,r10)
  154. 2:
  155. lwz r8,0(r5)
  156. lwz r9,4(r5)
  157. lwz r10,8(r5)
  158. lwz r11,12(r5)
  159. addi r5,r5,16
  160. stw r8,0(r6)
  161. stw r9,4(r6)
  162. stw r10,8(r6)
  163. stw r11,12(r6)
  164. addi r6,r6,16
  165. bdnz 2b
  166. lwz r10,pbe_next(r3)
  167. cmpwi 0,r10,0
  168. bne 1b
  169. /* Do a very simple cache flush/inval of the L1 to ensure
  170. * coherency of the icache
  171. */
  172. lis r3,0x0002
  173. mtctr r3
  174. li r3, 0
  175. 1:
  176. lwz r0,0(r3)
  177. addi r3,r3,0x0020
  178. bdnz 1b
  179. isync
  180. sync
  181. /* Now flush those cache lines */
  182. lis r3,0x0002
  183. mtctr r3
  184. li r3, 0
  185. 1:
  186. dcbf 0,r3
  187. addi r3,r3,0x0020
  188. bdnz 1b
  189. sync
  190. /* Ok, we are now running with the kernel data of the old
  191. * kernel fully restored. We can get to the save area
  192. * easily now. As for the rest of the code, it assumes the
  193. * loader kernel and the booted one are exactly identical
  194. */
  195. lis r11,swsusp_save_area@h
  196. ori r11,r11,swsusp_save_area@l
  197. tophys(r11,r11)
  198. #if 0
  199. /* Restore various CPU config stuffs */
  200. bl __restore_cpu_setup
  201. #endif
  202. /* Restore the BATs, and SDR1. Then we can turn on the MMU.
  203. * This is a bit hairy as we are running out of those BATs,
  204. * but first, our code is probably in the icache, and we are
  205. * writing the same value to the BAT, so that should be fine,
  206. * though a better solution will have to be found long-term
  207. */
  208. lwz r4,SL_SDR1(r11)
  209. mtsdr1 r4
  210. lwz r4,SL_SPRG0(r11)
  211. mtsprg 0,r4
  212. lwz r4,SL_SPRG0+4(r11)
  213. mtsprg 1,r4
  214. lwz r4,SL_SPRG0+8(r11)
  215. mtsprg 2,r4
  216. lwz r4,SL_SPRG0+12(r11)
  217. mtsprg 3,r4
  218. #if 0
  219. lwz r4,SL_DBAT0(r11)
  220. mtdbatu 0,r4
  221. lwz r4,SL_DBAT0+4(r11)
  222. mtdbatl 0,r4
  223. lwz r4,SL_DBAT1(r11)
  224. mtdbatu 1,r4
  225. lwz r4,SL_DBAT1+4(r11)
  226. mtdbatl 1,r4
  227. lwz r4,SL_DBAT2(r11)
  228. mtdbatu 2,r4
  229. lwz r4,SL_DBAT2+4(r11)
  230. mtdbatl 2,r4
  231. lwz r4,SL_DBAT3(r11)
  232. mtdbatu 3,r4
  233. lwz r4,SL_DBAT3+4(r11)
  234. mtdbatl 3,r4
  235. lwz r4,SL_IBAT0(r11)
  236. mtibatu 0,r4
  237. lwz r4,SL_IBAT0+4(r11)
  238. mtibatl 0,r4
  239. lwz r4,SL_IBAT1(r11)
  240. mtibatu 1,r4
  241. lwz r4,SL_IBAT1+4(r11)
  242. mtibatl 1,r4
  243. lwz r4,SL_IBAT2(r11)
  244. mtibatu 2,r4
  245. lwz r4,SL_IBAT2+4(r11)
  246. mtibatl 2,r4
  247. lwz r4,SL_IBAT3(r11)
  248. mtibatu 3,r4
  249. lwz r4,SL_IBAT3+4(r11)
  250. mtibatl 3,r4
  251. #endif
  252. BEGIN_FTR_SECTION
  253. li r4,0
  254. mtspr SPRN_DBAT4U,r4
  255. mtspr SPRN_DBAT4L,r4
  256. mtspr SPRN_DBAT5U,r4
  257. mtspr SPRN_DBAT5L,r4
  258. mtspr SPRN_DBAT6U,r4
  259. mtspr SPRN_DBAT6L,r4
  260. mtspr SPRN_DBAT7U,r4
  261. mtspr SPRN_DBAT7L,r4
  262. mtspr SPRN_IBAT4U,r4
  263. mtspr SPRN_IBAT4L,r4
  264. mtspr SPRN_IBAT5U,r4
  265. mtspr SPRN_IBAT5L,r4
  266. mtspr SPRN_IBAT6U,r4
  267. mtspr SPRN_IBAT6L,r4
  268. mtspr SPRN_IBAT7U,r4
  269. mtspr SPRN_IBAT7L,r4
  270. END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
  271. /* Flush all TLBs */
  272. lis r4,0x1000
  273. 1: addic. r4,r4,-0x1000
  274. tlbie r4
  275. blt 1b
  276. sync
  277. /* restore the MSR and turn on the MMU */
  278. lwz r3,SL_MSR(r11)
  279. bl turn_on_mmu
  280. tovirt(r11,r11)
  281. /* Restore TB */
  282. li r3,0
  283. mttbl r3
  284. lwz r3,SL_TB(r11)
  285. lwz r4,SL_TB+4(r11)
  286. mttbu r3
  287. mttbl r4
  288. /* Kick decrementer */
  289. li r0,1
  290. mtdec r0
  291. /* Restore the callee-saved registers and return */
  292. lwz r0,SL_CR(r11)
  293. mtcr r0
  294. lwz r2,SL_R2(r11)
  295. lmw r12,SL_R12(r11)
  296. lwz r1,SL_SP(r11)
  297. lwz r0,SL_LR(r11)
  298. mtlr r0
  299. // XXX Note: we don't really need to call swsusp_resume
  300. li r3,0
  301. blr
  302. /* FIXME:This construct is actually not useful since we don't shut
  303. * down the instruction MMU, we could just flip back MSR-DR on.
  304. */
  305. turn_on_mmu:
  306. mflr r4
  307. mtsrr0 r4
  308. mtsrr1 r3
  309. sync
  310. isync
  311. rfi